aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/nixge.txt32
-rw-r--r--Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt1
-rw-r--r--Documentation/networking/ice.txt39
-rw-r--r--Documentation/ptp/ptp.txt5
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/infiniband/core/cma.c1
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c14
-rw-r--r--drivers/infiniband/hw/qedr/main.c4
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c28
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c73
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/caif/caif_serial.c32
-rw-r--r--drivers/net/caif/caif_spi.c16
-rw-r--r--drivers/net/caif/caif_virtio.c16
-rw-r--r--drivers/net/can/at91_can.c3
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/cc770/cc770_isa.c16
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/janz-ican3.c6
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c14
-rw-r--r--drivers/net/can/softing/softing_main.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c50
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h13
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c15
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h8
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/lib8390.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c2
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c2
-rw-r--r--drivers/net/ethernet/8390/stnic.c2
-rw-r--r--drivers/net/ethernet/8390/wd.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c5
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c10
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c10
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c28
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c29
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c112
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c10
-rw-r--r--drivers/net/ethernet/ec_bhf.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig14
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c99
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c104
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile17
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h312
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1352
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c2233
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h86
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c1066
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h94
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c940
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h473
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c5495
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c236
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h73
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c1659
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c1883
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h161
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c1782
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h394
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c363
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c383
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c293
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c32
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c73
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c6
-rw-r--r--drivers/net/ethernet/ni/Kconfig27
-rw-r--r--drivers/net/ethernet/ni/Makefile1
-rw-r--r--drivers/net/ethernet/ni/nixge.c1310
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c415
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h1617
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c103
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c338
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c219
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c9
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c30
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h16
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c192
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h7
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c545
-rw-r--r--drivers/net/ethernet/sfc/efx.c30
-rw-r--r--drivers/net/ethernet/sfc/efx.h11
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c77
-rw-r--r--drivers/net/ethernet/sfc/farch.c80
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h16
-rw-r--r--drivers/net/ethernet/sfc/nic.h5
-rw-r--r--drivers/net/ethernet/sfc/rx.c119
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c26
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/geneve.c1
-rw-r--r--drivers/net/gtp.c1
-rw-r--r--drivers/net/hamradio/bpqether.c3
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ieee802154/at86rf230.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c1
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/devlink.c294
-rw-r--r--drivers/net/netdevsim/fib.c263
-rw-r--r--drivers/net/netdevsim/netdev.c12
-rw-r--r--drivers/net/netdevsim/netdevsim.h43
-rw-r--r--drivers/net/phy/sfp.c41
-rw-r--r--drivers/net/phy/spi_ks8995.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c3
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c12
-rw-r--r--drivers/net/usb/hso.c8
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c11
-rw-r--r--drivers/net/xen-netback/xenbus.c4
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--fs/afs/internal.h1
-rw-r--r--fs/afs/rxrpc.c12
-rw-r--r--fs/lockd/svc.c1
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs_common/grace.c1
-rw-r--r--fs/proc/proc_net.c1
-rw-r--r--include/linux/mlx5/cq.h6
-rw-r--r--include/linux/mlx5/device.h4
-rw-r--r--include/linux/mlx5/fs.h7
-rw-r--r--include/linux/mlx5/mlx5_ifc.h116
-rw-r--r--include/linux/mlx5/port.h6
-rw-r--r--include/linux/mlx5/transobj.h1
-rw-r--r--include/linux/mlx5/vport.h3
-rw-r--r--include/linux/mroute.h33
-rw-r--r--include/linux/mroute6.h10
-rw-r--r--include/linux/mroute_base.h128
-rw-r--r--include/linux/netdevice.h68
-rw-r--r--include/linux/qed/common_hsi.h2
-rw-r--r--include/linux/qed/eth_common.h2
-rw-r--r--include/linux/qed/iscsi_common.h4
-rw-r--r--include/linux/qed/qed_if.h19
-rw-r--r--include/linux/qed/rdma_common.h2
-rw-r--r--include/linux/qed/roce_common.h3
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--include/net/af_rxrpc.h11
-rw-r--r--include/net/cfg80211.h76
-rw-r--r--include/net/mac80211.h3
-rw-r--r--include/net/net_namespace.h23
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/regulatory.h28
-rw-r--r--include/net/sctp/structs.h8
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/trace/events/afs.h69
-rw-r--r--include/trace/events/rxrpc.h206
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/nl80211.h46
-rw-r--r--kernel/audit.c1
-rw-r--r--lib/kobject_uevent.c1
-rw-r--r--lib/test_bpf.c2
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/8021q/vlanproc.c6
-rw-r--r--net/appletalk/atalk_proc.c8
-rw-r--r--net/atm/atm_sysfs.c12
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/ax25/af_ax25.c6
-rw-r--r--net/bluetooth/rfcomm/tty.c4
-rw-r--r--net/bridge/br.c1
-rw-r--r--net/bridge/br_netfilter_hooks.c1
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/bridge/br_sysfs_if.c36
-rw-r--r--net/bridge/netfilter/ebtable_broute.c1
-rw-r--r--net/bridge/netfilter/ebtable_filter.c1
-rw-r--r--net/bridge/netfilter/ebtable_nat.c1
-rw-r--r--net/bridge/netfilter/nf_log_bridge.c1
-rw-r--r--net/caif/caif_dev.c1
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/can/bcm.c1
-rw-r--r--net/can/gw.c3
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/core/dev.c32
-rw-r--r--net/core/ethtool.c6
-rw-r--r--net/core/fib_notifier.c13
-rw-r--r--net/core/fib_rules.c7
-rw-r--r--net/core/net-procfs.c8
-rw-r--r--net/core/net-sysfs.c12
-rw-r--r--net/core/net_namespace.c95
-rw-r--r--net/core/pktgen.c1
-rw-r--r--net/core/rtnetlink.c12
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/sock_diag.c1
-rw-r--r--net/core/sysctl_net_core.c1
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/decnet/dn_neigh.c2
-rw-r--r--net/decnet/dn_route.c2
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/ieee802154/6lowpan/reassembly.c1
-rw-r--r--net/ieee802154/core.c1
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/arp.c3
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/esp4_offload.c2
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/fib_trie.c33
-rw-r--r--net/ipv4/fou.c1
-rw-r--r--net/ipv4/icmp.c1
-rw-r--r--net/ipv4/igmp.c5
-rw-r--r--net/ipv4/ip_fragment.c1
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ip_vti.c1
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/ipmr.c111
-rw-r--r--net/ipv4/ipmr_base.c42
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/arptable_filter.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/netfilter/iptable_filter.c1
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c1
-rw-r--r--net/ipv4/netfilter/iptable_nat.c1
-rw-r--r--net/ipv4/netfilter/iptable_raw.c1
-rw-r--r--net/ipv4/netfilter/iptable_security.c1
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nf_log_arp.c1
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c1
-rw-r--r--net/ipv4/ping.c3
-rw-r--r--net/ipv4/proc.c7
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/route.c8
-rw-r--r--net/ipv4/sysctl_net_ipv4.c1
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_metrics.c1
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv4/xfrm4_policy.c1
-rw-r--r--net/ipv6/addrconf.c32
-rw-r--r--net/ipv6/addrlabel.c1
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/anycast.c2
-rw-r--r--net/ipv6/esp6_offload.c2
-rw-r--r--net/ipv6/fib6_rules.c1
-rw-r--r--net/ipv6/icmp.c1
-rw-r--r--net/ipv6/ila/ila_xlat.c1
-rw-r--r--net/ipv6/ip6_fib.c17
-rw-r--r--net/ipv6/ip6_flowlabel.c3
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/ip6_vti.c1
-rw-r--r--net/ipv6/ip6mr.c126
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/ndisc.c1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c1
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c1
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c1
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c1
-rw-r--r--net/ipv6/netfilter/ip6table_security.c1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c1
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c1
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c1
-rw-r--r--net/ipv6/ping.c1
-rw-r--r--net/ipv6/proc.c7
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/seg6.c1
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/sysctl_net_ipv6.c1
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipv6/xfrm6_policy.c1
-rw-r--r--net/ipv6/xfrm6_tunnel.c1
-rw-r--r--net/kcm/kcmproc.c5
-rw-r--r--net/kcm/kcmsock.c1
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/l2tp/l2tp_core.c1
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/llc/llc_proc.c4
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/ht.c15
-rw-r--r--net/mac80211/ibss.c3
-rw-r--r--net/mac80211/ieee80211_i.h12
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/key.c8
-rw-r--r--net/mac80211/main.c10
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mlme.c168
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c8
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c8
-rw-r--r--net/mac80211/rx.c45
-rw-r--r--net/mac80211/scan.c4
-rw-r--r--net/mac80211/tx.c46
-rw-r--r--net/mac80211/util.c47
-rw-r--r--net/mac80211/vht.c32
-rw-r--r--net/mpls/af_mpls.c1
-rw-r--r--net/ncsi/ncsi-netlink.c4
-rw-r--r--net/netfilter/core.c1
-rw-r--r--net/netfilter/ipset/ip_set_core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c1
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c1
-rw-r--r--net/netfilter/nf_conntrack_snmp.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c3
-rw-r--r--net/netfilter/nf_log.c3
-rw-r--r--net/netfilter/nf_log_netdev.c1
-rw-r--r--net/netfilter/nf_synproxy_core.c3
-rw-r--r--net/netfilter/nf_tables_api.c1
-rw-r--r--net/netfilter/nfnetlink.c1
-rw-r--r--net/netfilter/nfnetlink_acct.c1
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c1
-rw-r--r--net/netfilter/nfnetlink_log.c1
-rw-r--r--net/netfilter/nfnetlink_queue.c1
-rw-r--r--net/netfilter/x_tables.c1
-rw-r--r--net/netfilter/xt_IDLETIMER.c2
-rw-r--r--net/netfilter/xt_hashlimit.c1
-rw-r--r--net/netfilter/xt_recent.c5
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/netlink/genetlink.c1
-rw-r--r--net/netrom/af_netrom.c6
-rw-r--r--net/openvswitch/datapath.c5
-rw-r--r--net/packet/af_packet.c1
-rw-r--r--net/phonet/pn_dev.c1
-rw-r--r--net/rds/tcp.c1
-rw-r--r--net/rose/af_rose.c8
-rw-r--r--net/rxrpc/af_rxrpc.c9
-rw-r--r--net/rxrpc/ar-internal.h9
-rw-r--r--net/rxrpc/call_accept.c18
-rw-r--r--net/rxrpc/call_event.c1
-rw-r--r--net/rxrpc/call_object.c15
-rw-r--r--net/rxrpc/conn_event.c3
-rw-r--r--net/rxrpc/input.c6
-rw-r--r--net/rxrpc/net_ns.c1
-rw-r--r--net/rxrpc/sendmsg.c3
-rw-r--r--net/sched/act_api.c1
-rw-r--r--net/sched/act_bpf.c1
-rw-r--r--net/sched/act_connmark.c1
-rw-r--r--net/sched/act_csum.c1
-rw-r--r--net/sched/act_gact.c1
-rw-r--r--net/sched/act_ife.c1
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_mirred.c1
-rw-r--r--net/sched/act_nat.c1
-rw-r--r--net/sched/act_pedit.c1
-rw-r--r--net/sched/act_police.c1
-rw-r--r--net/sched/act_sample.c1
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/act_skbedit.c1
-rw-r--r--net/sched/act_skbmod.c1
-rw-r--r--net/sched/act_tunnel_key.c1
-rw-r--r--net/sched/act_vlan.c1
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sched/sch_api.c1
-rw-r--r--net/sctp/endpointola.c8
-rw-r--r--net/sctp/input.c13
-rw-r--r--net/sctp/proc.c16
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/cache.c10
-rw-r--r--net/sunrpc/debugfs.c6
-rw-r--r--net/sunrpc/rpc_pipe.c42
-rw-r--r--net/sysctl_net.c1
-rw-r--r--net/tipc/core.c1
-rw-r--r--net/tipc/node.c4
-rw-r--r--net/tipc/udp_media.c3
-rw-r--r--net/unix/af_unix.c1
-rw-r--r--net/wireless/ap.c1
-rw-r--r--net/wireless/chan.c9
-rw-r--r--net/wireless/core.c1
-rw-r--r--net/wireless/core.h12
-rw-r--r--net/wireless/ibss.c27
-rw-r--r--net/wireless/mesh.c16
-rw-r--r--net/wireless/mlme.c9
-rw-r--r--net/wireless/nl80211.c205
-rw-r--r--net/wireless/rdev-ops.h15
-rw-r--r--net/wireless/reg.c206
-rw-r--r--net/wireless/sme.c43
-rw-r--r--net/wireless/trace.h47
-rw-r--r--net/wireless/wext-core.c7
-rw-r--r--net/wireless/wext-proc.c2
-rw-r--r--net/x25/x25_proc.c12
-rw-r--r--net/xfrm/xfrm_input.c3
-rw-r--r--net/xfrm/xfrm_policy.c5
-rw-r--r--net/xfrm/xfrm_proc.c2
-rw-r--r--net/xfrm/xfrm_user.c1
-rw-r--r--security/selinux/hooks.c1
-rw-r--r--security/selinux/include/xfrm.h4
-rw-r--r--security/smack/smack_netfilter.c1
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json10
506 files changed, 27350 insertions, 3921 deletions
diff --git a/Documentation/devicetree/bindings/net/nixge.txt b/Documentation/devicetree/bindings/net/nixge.txt
new file mode 100644
index 000000000000..e55af7f0881a
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nixge.txt
@@ -0,0 +1,32 @@
+* NI XGE Ethernet controller
+
+Required properties:
+- compatible: Should be "ni,xge-enet-2.00"
+- reg: Address and length of the register set for the device
+- interrupts: Should contain tx and rx interrupt
+- interrupt-names: Should be "rx" and "tx"
+- phy-mode: See ethernet.txt file in the same directory.
+- phy-handle: See ethernet.txt file in the same directory.
+- nvmem-cells: Phandle of nvmem cell containing the MAC address
+- nvmem-cell-names: Should be "address"
+
+Examples (10G generic PHY):
+ nixge0: ethernet@40000000 {
+ compatible = "ni,xge-enet-2.00";
+ reg = <0x40000000 0x6000>;
+
+ nvmem-cells = <&eth1_addr>;
+ nvmem-cell-names = "address";
+
+ interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>, <0 30 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx", "tx";
+ interrupt-parent = <&intc>;
+
+ phy-mode = "xgmii";
+ phy-handle = <&ethernet_phy1>;
+
+ ethernet_phy1: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
index 270ea4efff13..96398cc2982f 100644
--- a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
+++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
@@ -9,6 +9,7 @@ Required properties:
- "socionext,uniphier-pxs2-ave4" : for PXs2 SoC
- "socionext,uniphier-ld11-ave4" : for LD11 SoC
- "socionext,uniphier-ld20-ave4" : for LD20 SoC
+ - "socionext,uniphier-pxs3-ave4" : for PXs3 SoC
- reg: Address where registers are mapped and size of region.
- interrupts: Should contain the MAC interrupt.
- phy-mode: See ethernet.txt in the same directory. Allow to choose
diff --git a/Documentation/networking/ice.txt b/Documentation/networking/ice.txt
new file mode 100644
index 000000000000..6261c46378e1
--- /dev/null
+++ b/Documentation/networking/ice.txt
@@ -0,0 +1,39 @@
+Intel(R) Ethernet Connection E800 Series Linux Driver
+===================================================================
+
+Intel ice Linux driver.
+Copyright(c) 2018 Intel Corporation.
+
+Contents
+========
+- Enabling the driver
+- Support
+
+The driver in this release supports Intel's E800 Series of products. For
+more information, visit Intel's support page at http://support.intel.com.
+
+Enabling the driver
+===================
+
+The driver is enabled via the standard kernel configuration system,
+using the make command:
+
+ Make oldconfig/silentoldconfig/menuconfig/etc.
+
+The driver is located in the menu structure at:
+
+ -> Device Drivers
+ -> Network device support (NETDEVICES [=y])
+ -> Ethernet driver support
+ -> Intel devices
+ -> Intel(R) Ethernet Connection E800 Series Support
+
+Support
+=======
+
+For general information, go to the Intel support website at:
+
+ http://support.intel.com
+
+If an issue is identified with the released source code, please email
+the maintainer listed in the MAINTAINERS file.
diff --git a/Documentation/ptp/ptp.txt b/Documentation/ptp/ptp.txt
index ae8fef86b832..11e904ee073f 100644
--- a/Documentation/ptp/ptp.txt
+++ b/Documentation/ptp/ptp.txt
@@ -18,7 +18,6 @@
- Adjust clock frequency
+ Ancillary clock features
- - One short or periodic alarms, with signal delivery to user program
- Time stamp external events
- Period output signals configurable from user space
- Synchronization of the Linux system time via the PPS subsystem
@@ -48,9 +47,7 @@
User space programs may control the clock using standardized
ioctls. A program may query, enable, configure, and disable the
ancillary clock features. User space can receive time stamped
- events via blocking read() and poll(). One shot and periodic
- signals may be configured via the POSIX timer_settime() system
- call.
+ events via blocking read() and poll().
** Writing clock drivers
diff --git a/MAINTAINERS b/MAINTAINERS
index b3ea844cf228..9107d9241564 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7063,6 +7063,7 @@ F: Documentation/networking/ixgbe.txt
F: Documentation/networking/ixgbevf.txt
F: Documentation/networking/i40e.txt
F: Documentation/networking/i40evf.txt
+F: Documentation/networking/ice.txt
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/
F: include/linux/avf/virtchnl.h
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 66f203730e80..6ab1059fed66 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4554,7 +4554,6 @@ static struct pernet_operations cma_pernet_operations = {
.exit = cma_exit_net,
.id = &cma_pernet_id,
.size = sizeof(struct cma_pernet),
- .async = true,
};
static int __init cma_init(void)
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 5a52ec77940a..cc2966380c0c 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -403,10 +403,12 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
* our feet
*/
rtnl_lock();
+ down_read(&net_rwsem);
for_each_net(net)
for_each_netdev(net, ndev)
if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
+ up_read(&net_rwsem);
rtnl_unlock();
}
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 94a27d89a303..77d257ec899b 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -267,14 +267,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
{
- __be32 *p = (__be32 *)cqe;
- int i;
-
mlx5_ib_warn(dev, "dump error cqe\n");
- for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4)
- pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p[0]),
- be32_to_cpu(p[1]), be32_to_cpu(p[2]),
- be32_to_cpu(p[3]));
+ mlx5_dump_err_cqe(dev->mdev, cqe);
}
static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 85c612ac547a..0d0b0b8dad98 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4739,26 +4739,14 @@ static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq,
u8 *sq_state)
{
- void *out;
- void *sqc;
- int inlen;
int err;
- inlen = MLX5_ST_SZ_BYTES(query_sq_out);
- out = kvzalloc(inlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- err = mlx5_core_query_sq(dev->mdev, sq->base.mqp.qpn, out);
+ err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
if (err)
goto out;
-
- sqc = MLX5_ADDR_OF(query_sq_out, out, sq_context);
- *sq_state = MLX5_GET(sqc, sqc, state);
sq->state = *sq_state;
out:
- kvfree(out);
return err;
}
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index db4bf97c0e15..eb32abb0099a 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -90,8 +90,8 @@ static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
dev_hold(qdev->ndev);
/* The HW vendor's device driver must guarantee
- * that this function returns NULL before the net device reaches
- * NETDEV_UNREGISTER_FINAL state.
+ * that this function returns NULL before the net device has finished
+ * NETDEV_UNREGISTER state.
*/
return qdev->ndev;
}
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index 78b49002fbd2..b816c80df50b 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -45,7 +45,7 @@ struct rdma_cqe_responder {
__le32 imm_data_or_inv_r_Key;
__le32 length;
__le32 imm_data_hi;
- __le16 rq_cons;
+ __le16 rq_cons_or_srq_id;
u8 flags;
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
@@ -115,6 +115,7 @@ enum rdma_cqe_requester_status_enum {
RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
+ RDMA_CQE_REQ_STS_XRC_VOILATION_ERR,
MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
};
@@ -136,6 +137,7 @@ enum rdma_cqe_type {
RDMA_CQE_TYPE_REQUESTER,
RDMA_CQE_TYPE_RESPONDER_RQ,
RDMA_CQE_TYPE_RESPONDER_SRQ,
+ RDMA_CQE_TYPE_RESPONDER_XRC_SRQ,
RDMA_CQE_TYPE_INVALID,
MAX_RDMA_CQE_TYPE
};
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 875b17272d65..7d51ef47667f 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3695,7 +3695,7 @@ static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
struct rdma_cqe_responder *resp, int *update)
{
- if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
+ if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
consume_cqe(cq);
*update |= 1;
}
@@ -3710,7 +3710,7 @@ static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
cnt = process_resp_flush(qp, cq, num_entries, wc,
- resp->rq_cons);
+ resp->rq_cons_or_srq_id);
try_consume_resp_cqe(cq, qp, resp, update);
} else {
cnt = process_resp_one(dev, qp, cq, wc, resp);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index f45e99a938e0..ca5638091b55 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -95,24 +95,6 @@ void usnic_ib_log_vf(struct usnic_ib_vf *vf)
}
/* Start of netdev section */
-static inline const char *usnic_ib_netdev_event_to_string(unsigned long event)
-{
- const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN",
- "NETDEV_REBOOT", "NETDEV_CHANGE",
- "NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU",
- "NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE",
- "NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP",
- "NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE",
- "NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE",
- "NETDEV_NOTIFY_PEERS", "NETDEV_JOIN"
- };
-
- if (event >= ARRAY_SIZE(event2str))
- return "UNKNOWN_NETDEV_EVENT";
- else
- return event2str[event];
-}
-
static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
{
struct usnic_ib_ucontext *ctx;
@@ -185,7 +167,7 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
ib_dispatch_event(&ib_event);
} else {
usnic_dbg("Ignoring %s on %s\n",
- usnic_ib_netdev_event_to_string(event),
+ netdev_cmd_to_name(event),
us_ibdev->ib_dev.name);
}
break;
@@ -222,7 +204,7 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
break;
default:
usnic_dbg("Ignoring event %s on %s",
- usnic_ib_netdev_event_to_string(event),
+ netdev_cmd_to_name(event),
us_ibdev->ib_dev.name);
}
mutex_unlock(&us_ibdev->usdev_lock);
@@ -264,7 +246,7 @@ static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
switch (event) {
case NETDEV_DOWN:
usnic_info("%s via ip notifiers",
- usnic_ib_netdev_event_to_string(event));
+ netdev_cmd_to_name(event));
usnic_fwd_del_ipaddr(us_ibdev->ufdev);
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_GID_CHANGE;
@@ -275,7 +257,7 @@ static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
case NETDEV_UP:
usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
usnic_info("%s via ip notifiers: ip %pI4",
- usnic_ib_netdev_event_to_string(event),
+ netdev_cmd_to_name(event),
&us_ibdev->ufdev->inaddr);
ib_event.event = IB_EVENT_GID_CHANGE;
ib_event.device = &us_ibdev->ib_dev;
@@ -284,7 +266,7 @@ static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
break;
default:
usnic_info("Ignoring event %s on %s",
- usnic_ib_netdev_event_to_string(event),
+ netdev_cmd_to_name(event),
us_ibdev->ib_dev.name);
}
mutex_unlock(&us_ibdev->usdev_lock);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 08b85215c2be..891846655000 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -500,6 +500,7 @@ source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
tristate "Simulated networking device"
depends on DEBUG_FS
+ depends on MAY_USE_DEVLINK
help
This driver is a developer testing tool and software model that can
be used to test various control path networking APIs, especially
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4c19d23dd282..c669554d70bb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4791,7 +4791,6 @@ static struct pernet_operations bond_net_ops = {
.exit = bond_net_exit,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
- .async = true,
};
static int __init bonding_init(void)
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index f7799321dffb..01059f1a7bca 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -287,7 +287,7 @@ void bond_create_proc_entry(struct bonding *bond)
if (bn->proc_dir) {
bond->proc_entry = proc_create_data(bond_dev->name,
- S_IRUGO, bn->proc_dir,
+ 0444, bn->proc_dir,
&bond_info_fops, bond);
if (bond->proc_entry == NULL)
netdev_warn(bond_dev, "Cannot create /proc/net/%s/%s\n",
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 040b493f60ae..6096440e96ea 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -147,7 +147,7 @@ err_no_cmd:
static const struct class_attribute class_attr_bonding_masters = {
.attr = {
.name = "bonding_masters",
- .mode = S_IWUSR | S_IRUGO,
+ .mode = 0644,
},
.show = bonding_show_bonds,
.store = bonding_store_bonds,
@@ -202,7 +202,7 @@ static ssize_t bonding_show_slaves(struct device *d,
return res;
}
-static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
+static DEVICE_ATTR(slaves, 0644, bonding_show_slaves,
bonding_sysfs_store_option);
/* Show the bonding mode. */
@@ -216,8 +216,7 @@ static ssize_t bonding_show_mode(struct device *d,
return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
}
-static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- bonding_show_mode, bonding_sysfs_store_option);
+static DEVICE_ATTR(mode, 0644, bonding_show_mode, bonding_sysfs_store_option);
/* Show the bonding transmit hash method. */
static ssize_t bonding_show_xmit_hash(struct device *d,
@@ -231,7 +230,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
-static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(xmit_hash_policy, 0644,
bonding_show_xmit_hash, bonding_sysfs_store_option);
/* Show arp_validate. */
@@ -247,7 +246,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
-static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
+static DEVICE_ATTR(arp_validate, 0644, bonding_show_arp_validate,
bonding_sysfs_store_option);
/* Show arp_all_targets. */
@@ -263,7 +262,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
return sprintf(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
-static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(arp_all_targets, 0644,
bonding_show_arp_all_targets, bonding_sysfs_store_option);
/* Show fail_over_mac. */
@@ -279,7 +278,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
-static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(fail_over_mac, 0644,
bonding_show_fail_over_mac, bonding_sysfs_store_option);
/* Show the arp timer interval. */
@@ -291,7 +290,7 @@ static ssize_t bonding_show_arp_interval(struct device *d,
return sprintf(buf, "%d\n", bond->params.arp_interval);
}
-static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(arp_interval, 0644,
bonding_show_arp_interval, bonding_sysfs_store_option);
/* Show the arp targets. */
@@ -312,7 +311,7 @@ static ssize_t bonding_show_arp_targets(struct device *d,
return res;
}
-static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(arp_ip_target, 0644,
bonding_show_arp_targets, bonding_sysfs_store_option);
/* Show the up and down delays. */
@@ -324,7 +323,7 @@ static ssize_t bonding_show_downdelay(struct device *d,
return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
-static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(downdelay, 0644,
bonding_show_downdelay, bonding_sysfs_store_option);
static ssize_t bonding_show_updelay(struct device *d,
@@ -336,7 +335,7 @@ static ssize_t bonding_show_updelay(struct device *d,
return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
-static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(updelay, 0644,
bonding_show_updelay, bonding_sysfs_store_option);
/* Show the LACP interval. */
@@ -351,7 +350,7 @@ static ssize_t bonding_show_lacp(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
-static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(lacp_rate, 0644,
bonding_show_lacp, bonding_sysfs_store_option);
static ssize_t bonding_show_min_links(struct device *d,
@@ -362,7 +361,7 @@ static ssize_t bonding_show_min_links(struct device *d,
return sprintf(buf, "%u\n", bond->params.min_links);
}
-static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(min_links, 0644,
bonding_show_min_links, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_select(struct device *d,
@@ -376,7 +375,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
}
-static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(ad_select, 0644,
bonding_show_ad_select, bonding_sysfs_store_option);
/* Show the number of peer notifications to send after a failover event. */
@@ -387,9 +386,9 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
struct bonding *bond = to_bond(d);
return sprintf(buf, "%d\n", bond->params.num_peer_notif);
}
-static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(num_grat_arp, 0644,
bonding_show_num_peer_notif, bonding_sysfs_store_option);
-static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(num_unsol_na, 0644,
bonding_show_num_peer_notif, bonding_sysfs_store_option);
/* Show the MII monitor interval. */
@@ -401,7 +400,7 @@ static ssize_t bonding_show_miimon(struct device *d,
return sprintf(buf, "%d\n", bond->params.miimon);
}
-static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(miimon, 0644,
bonding_show_miimon, bonding_sysfs_store_option);
/* Show the primary slave. */
@@ -421,7 +420,7 @@ static ssize_t bonding_show_primary(struct device *d,
return count;
}
-static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(primary, 0644,
bonding_show_primary, bonding_sysfs_store_option);
/* Show the primary_reselect flag. */
@@ -438,7 +437,7 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
return sprintf(buf, "%s %d\n",
val->string, bond->params.primary_reselect);
}
-static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
/* Show the use_carrier flag. */
@@ -450,7 +449,7 @@ static ssize_t bonding_show_carrier(struct device *d,
return sprintf(buf, "%d\n", bond->params.use_carrier);
}
-static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
@@ -471,7 +470,7 @@ static ssize_t bonding_show_active_slave(struct device *d,
return count;
}
-static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(active_slave, 0644,
bonding_show_active_slave, bonding_sysfs_store_option);
/* Show link status of the bond interface. */
@@ -484,7 +483,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
return sprintf(buf, "%s\n", active ? "up" : "down");
}
-static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
+static DEVICE_ATTR(mii_status, 0444, bonding_show_mii_status, NULL);
/* Show current 802.3ad aggregator ID. */
static ssize_t bonding_show_ad_aggregator(struct device *d,
@@ -503,7 +502,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
return count;
}
-static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
+static DEVICE_ATTR(ad_aggregator, 0444, bonding_show_ad_aggregator, NULL);
/* Show number of active 802.3ad ports. */
@@ -523,7 +522,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
return count;
}
-static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
+static DEVICE_ATTR(ad_num_ports, 0444, bonding_show_ad_num_ports, NULL);
/* Show current 802.3ad actor key. */
@@ -543,7 +542,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
return count;
}
-static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
+static DEVICE_ATTR(ad_actor_key, 0444, bonding_show_ad_actor_key, NULL);
/* Show current 802.3ad partner key. */
@@ -563,7 +562,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
return count;
}
-static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
+static DEVICE_ATTR(ad_partner_key, 0444, bonding_show_ad_partner_key, NULL);
/* Show current 802.3ad partner mac. */
@@ -582,7 +581,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
return count;
}
-static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
+static DEVICE_ATTR(ad_partner_mac, 0444, bonding_show_ad_partner_mac, NULL);
/* Show the queue_ids of the slaves in the current bond. */
static ssize_t bonding_show_queue_id(struct device *d,
@@ -615,7 +614,7 @@ static ssize_t bonding_show_queue_id(struct device *d,
return res;
}
-static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
+static DEVICE_ATTR(queue_id, 0644, bonding_show_queue_id,
bonding_sysfs_store_option);
@@ -628,7 +627,7 @@ static ssize_t bonding_show_slaves_active(struct device *d,
return sprintf(buf, "%d\n", bond->params.all_slaves_active);
}
-static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(all_slaves_active, 0644,
bonding_show_slaves_active, bonding_sysfs_store_option);
/* Show the number of IGMP membership reports to send on link failure */
@@ -640,7 +639,7 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
return sprintf(buf, "%d\n", bond->params.resend_igmp);
}
-static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(resend_igmp, 0644,
bonding_show_resend_igmp, bonding_sysfs_store_option);
@@ -652,7 +651,7 @@ static ssize_t bonding_show_lp_interval(struct device *d,
return sprintf(buf, "%d\n", bond->params.lp_interval);
}
-static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(lp_interval, 0644,
bonding_show_lp_interval, bonding_sysfs_store_option);
static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
@@ -662,7 +661,7 @@ static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
struct bonding *bond = to_bond(d);
return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
-static DEVICE_ATTR(tlb_dynamic_lb, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(tlb_dynamic_lb, 0644,
bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
static ssize_t bonding_show_packets_per_slave(struct device *d,
@@ -674,7 +673,7 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
return sprintf(buf, "%u\n", packets_per_slave);
}
-static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(packets_per_slave, 0644,
bonding_show_packets_per_slave, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
@@ -688,7 +687,7 @@ static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
return 0;
}
-static DEVICE_ATTR(ad_actor_sys_prio, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(ad_actor_sys_prio, 0644,
bonding_show_ad_actor_sys_prio, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_actor_system(struct device *d,
@@ -703,7 +702,7 @@ static ssize_t bonding_show_ad_actor_system(struct device *d,
return 0;
}
-static DEVICE_ATTR(ad_actor_system, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(ad_actor_system, 0644,
bonding_show_ad_actor_system, bonding_sysfs_store_option);
static ssize_t bonding_show_ad_user_port_key(struct device *d,
@@ -717,7 +716,7 @@ static ssize_t bonding_show_ad_user_port_key(struct device *d,
return 0;
}
-static DEVICE_ATTR(ad_user_port_key, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(ad_user_port_key, 0644,
bonding_show_ad_user_port_key, bonding_sysfs_store_option);
static struct attribute *per_bond_attrs[] = {
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 7d16c51e6913..2f120b2ffef0 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -25,8 +25,8 @@ const struct slave_attribute slave_attr_##_name = { \
.mode = _mode }, \
.show = _show, \
};
-#define SLAVE_ATTR_RO(_name) \
- SLAVE_ATTR(_name, S_IRUGO, _name##_show)
+#define SLAVE_ATTR_RO(_name) \
+ SLAVE_ATTR(_name, 0444, _name##_show)
static ssize_t state_show(struct slave *slave, char *buf)
{
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 709838e4c062..a0f954f36c09 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -40,20 +40,20 @@ static LIST_HEAD(ser_list);
static LIST_HEAD(ser_release_list);
static bool ser_loop;
-module_param(ser_loop, bool, S_IRUGO);
+module_param(ser_loop, bool, 0444);
MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
static bool ser_use_stx = true;
-module_param(ser_use_stx, bool, S_IRUGO);
+module_param(ser_use_stx, bool, 0444);
MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
static bool ser_use_fcs = true;
-module_param(ser_use_fcs, bool, S_IRUGO);
+module_param(ser_use_fcs, bool, 0444);
MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
static int ser_write_chunk = MAX_WRITE_CHUNK;
-module_param(ser_write_chunk, int, S_IRUGO);
+module_param(ser_write_chunk, int, 0444);
MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
@@ -97,21 +97,21 @@ static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
ser->debugfs_tty_dir =
debugfs_create_dir(tty->name, debugfsdir);
if (!IS_ERR(ser->debugfs_tty_dir)) {
- debugfs_create_blob("last_tx_msg", S_IRUSR,
- ser->debugfs_tty_dir,
- &ser->tx_blob);
+ debugfs_create_blob("last_tx_msg", 0400,
+ ser->debugfs_tty_dir,
+ &ser->tx_blob);
- debugfs_create_blob("last_rx_msg", S_IRUSR,
- ser->debugfs_tty_dir,
- &ser->rx_blob);
+ debugfs_create_blob("last_rx_msg", 0400,
+ ser->debugfs_tty_dir,
+ &ser->rx_blob);
- debugfs_create_x32("ser_state", S_IRUSR,
- ser->debugfs_tty_dir,
- (u32 *)&ser->state);
+ debugfs_create_x32("ser_state", 0400,
+ ser->debugfs_tty_dir,
+ (u32 *)&ser->state);
- debugfs_create_x8("tty_status", S_IRUSR,
- ser->debugfs_tty_dir,
- &ser->tty_status);
+ debugfs_create_x8("tty_status", 0400,
+ ser->debugfs_tty_dir,
+ &ser->tty_status);
}
ser->tx_blob.data = ser->tx_data;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 980eace53d44..d28a1398c091 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -35,27 +35,27 @@ MODULE_DESCRIPTION("CAIF SPI driver");
#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
static bool spi_loop;
-module_param(spi_loop, bool, S_IRUGO);
+module_param(spi_loop, bool, 0444);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
/* SPI frame alignment. */
-module_param(spi_frm_align, int, S_IRUGO);
+module_param(spi_frm_align, int, 0444);
MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
/*
* SPI padding options.
* Warning: must be a base of 2 (& operation used) and can not be zero !
*/
-module_param(spi_up_head_align, int, S_IRUGO);
+module_param(spi_up_head_align, int, 0444);
MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
-module_param(spi_up_tail_align, int, S_IRUGO);
+module_param(spi_up_tail_align, int, 0444);
MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment.");
-module_param(spi_down_head_align, int, S_IRUGO);
+module_param(spi_down_head_align, int, 0444);
MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment.");
-module_param(spi_down_tail_align, int, S_IRUGO);
+module_param(spi_down_tail_align, int, 0444);
MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
#ifdef CONFIG_ARM
@@ -250,10 +250,10 @@ static const struct file_operations dbgfs_frame_fops = {
static inline void dev_debugfs_add(struct cfspi *cfspi)
{
cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root);
- cfspi->dbgfs_state = debugfs_create_file("state", S_IRUGO,
+ cfspi->dbgfs_state = debugfs_create_file("state", 0444,
cfspi->dbgfs_dir, cfspi,
&dbgfs_state_fops);
- cfspi->dbgfs_frame = debugfs_create_file("frame", S_IRUGO,
+ cfspi->dbgfs_frame = debugfs_create_file("frame", 0444,
cfspi->dbgfs_dir, cfspi,
&dbgfs_frame_fops);
}
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index c3d104feee13..2814e0dee4bb 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -629,21 +629,21 @@ static inline void debugfs_init(struct cfv_info *cfv)
if (IS_ERR(cfv->debugfs))
return;
- debugfs_create_u32("rx-napi-complete", S_IRUSR, cfv->debugfs,
+ debugfs_create_u32("rx-napi-complete", 0400, cfv->debugfs,
&cfv->stats.rx_napi_complete);
- debugfs_create_u32("rx-napi-resched", S_IRUSR, cfv->debugfs,
+ debugfs_create_u32("rx-napi-resched", 0400, cfv->debugfs,
&cfv->stats.rx_napi_resched);
- debugfs_create_u32("rx-nomem", S_IRUSR, cfv->debugfs,
+ debugfs_create_u32("rx-nomem", 0400, cfv->debugfs,
&cfv->stats.rx_nomem);
- debugfs_create_u32("rx-kicks", S_IRUSR, cfv->debugfs,
+ debugfs_create_u32("rx-kicks", 0400, cfv->debugfs,
&cfv->stats.rx_kicks);
- debugfs_create_u32("tx-full-ring", S_IRUSR, cfv->debugfs,
+ debugfs_create_u32("tx-full-ring", 0400, cfv->debugfs,
&cfv->stats.tx_full_ring);
- debugfs_create_u32("tx-no-mem", S_IRUSR, cfv->debugfs,
+ debugfs_create_u32("tx-no-mem", 0400, cfv->debugfs,
&cfv->stats.tx_no_mem);
- debugfs_create_u32("tx-kicks", S_IRUSR, cfv->debugfs,
+ debugfs_create_u32("tx-kicks", 0400, cfv->debugfs,
&cfv->stats.tx_kicks);
- debugfs_create_u32("tx-flow-on", S_IRUSR, cfv->debugfs,
+ debugfs_create_u32("tx-flow-on", 0400, cfv->debugfs,
&cfv->stats.tx_flow_on);
}
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f37ce0e1b603..d98c69045b17 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1224,8 +1224,7 @@ static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
return ret;
}
-static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
- at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
+static DEVICE_ATTR(mb0_id, 0644, at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
static struct attribute *at91_sysfs_attrs[] = {
&dev_attr_mb0_id.attr,
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 6da69af103e6..d4dd4da23997 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -67,12 +67,12 @@ MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver");
* otherwise 11 bit SFF messages.
*/
static int msgobj15_eff;
-module_param(msgobj15_eff, int, S_IRUGO);
+module_param(msgobj15_eff, int, 0444);
MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
"(default: 11-bit standard frames)");
static int i82527_compat;
-module_param(i82527_compat, int, S_IRUGO);
+module_param(i82527_compat, int, 0444);
MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
"without using additional functions");
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 3a30fd3b4498..fcd34698074f 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -82,29 +82,29 @@ static u8 cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
static u8 bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-module_param_hw_array(port, ulong, ioport, NULL, S_IRUGO);
+module_param_hw_array(port, ulong, ioport, NULL, 0444);
MODULE_PARM_DESC(port, "I/O port number");
-module_param_hw_array(mem, ulong, iomem, NULL, S_IRUGO);
+module_param_hw_array(mem, ulong, iomem, NULL, 0444);
MODULE_PARM_DESC(mem, "I/O memory address");
-module_param_hw_array(indirect, int, ioport, NULL, S_IRUGO);
+module_param_hw_array(indirect, int, ioport, NULL, 0444);
MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
-module_param_hw_array(irq, int, irq, NULL, S_IRUGO);
+module_param_hw_array(irq, int, irq, NULL, 0444);
MODULE_PARM_DESC(irq, "IRQ number");
-module_param_array(clk, int, NULL, S_IRUGO);
+module_param_array(clk, int, NULL, 0444);
MODULE_PARM_DESC(clk, "External oscillator clock frequency "
"(default=16000000 [16 MHz])");
-module_param_array(cir, byte, NULL, S_IRUGO);
+module_param_array(cir, byte, NULL, 0444);
MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])");
-module_param_array(cor, byte, NULL, S_IRUGO);
+module_param_array(cor, byte, NULL, 0444);
MODULE_PARM_DESC(cor, "Clockout register (default=0x00)");
-module_param_array(bcr, byte, NULL, S_IRUGO);
+module_param_array(bcr, byte, NULL, 0444);
MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])");
#define CC770_IOSIZE 0x20
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 897c6b113d3f..2d3046afa80d 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1484,7 +1484,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
} \
} \
module_param_named(name, grcan_module_config.name, \
- mtype, S_IRUGO); \
+ mtype, 0444); \
MODULE_PARM_DESC(name, desc)
#define GRCAN_CONFIG_ATTR(name, desc) \
@@ -1513,7 +1513,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
struct grcan_priv *priv = netdev_priv(dev); \
return sprintf(buf, "%d\n", priv->config.name); \
} \
- static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
+ static DEVICE_ATTR(name, 0644, \
grcan_show_##name, \
grcan_store_##name); \
GRCAN_MODULE_PARAM(name, ushort, GRCAN_NOT_BOOL, desc)
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 12a53c8e8e1d..adfdb66a486e 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1865,9 +1865,9 @@ static ssize_t ican3_sysfs_show_fwinfo(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
}
-static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
- ican3_sysfs_set_term);
-static DEVICE_ATTR(fwinfo, S_IRUSR | S_IRUGO, ican3_sysfs_show_fwinfo, NULL);
+static DEVICE_ATTR(termination, 0644, ican3_sysfs_show_term,
+ ican3_sysfs_set_term);
+static DEVICE_ATTR(fwinfo, 0444, ican3_sysfs_show_fwinfo, NULL);
static struct attribute *ican3_sysfs_attrs[] = {
&dev_attr_termination.attr,
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index a89c1e92554d..1a2ae6ce8d87 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -48,27 +48,27 @@ static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */
-module_param_hw_array(port, ulong, ioport, NULL, S_IRUGO);
+module_param_hw_array(port, ulong, ioport, NULL, 0444);
MODULE_PARM_DESC(port, "I/O port number");
-module_param_hw_array(mem, ulong, iomem, NULL, S_IRUGO);
+module_param_hw_array(mem, ulong, iomem, NULL, 0444);
MODULE_PARM_DESC(mem, "I/O memory address");
-module_param_hw_array(indirect, int, ioport, NULL, S_IRUGO);
+module_param_hw_array(indirect, int, ioport, NULL, 0444);
MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
-module_param_hw_array(irq, int, irq, NULL, S_IRUGO);
+module_param_hw_array(irq, int, irq, NULL, 0444);
MODULE_PARM_DESC(irq, "IRQ number");
-module_param_array(clk, int, NULL, S_IRUGO);
+module_param_array(clk, int, NULL, 0444);
MODULE_PARM_DESC(clk, "External oscillator clock frequency "
"(default=16000000 [16 MHz])");
-module_param_array(cdr, byte, NULL, S_IRUGO);
+module_param_array(cdr, byte, NULL, 0444);
MODULE_PARM_DESC(cdr, "Clock divider register "
"(default=0x48 [CDR_CBP | CDR_CLK_OFF])");
-module_param_array(ocr, byte, NULL, S_IRUGO);
+module_param_array(ocr, byte, NULL, 0444);
MODULE_PARM_DESC(ocr, "Output control register "
"(default=0x18 [OCR_TX0_PUSHPULL])");
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 5f64deec9f6c..e22696190583 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -601,8 +601,8 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
return count;
}
-static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
-static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
+static const DEVICE_ATTR(chip, 0444, show_chip, NULL);
+static const DEVICE_ATTR(output, 0644, show_output, store_output);
static const struct attribute *const netdev_sysfs_attrs[] = {
&dev_attr_chip.attr,
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 98d118b3aaf4..e90817608645 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -220,7 +220,7 @@
#define DEVICE_NAME "mcp251x"
static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
-module_param(mcp251x_enable_dma, int, S_IRUGO);
+module_param(mcp251x_enable_dma, int, 0444);
MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
static const struct can_bittiming_const mcp251x_bittiming_const = {
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index c6dcf93675c0..5820fd5b69b5 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -496,7 +496,7 @@ static ssize_t show_firmware(struct device *d,
(dev->version >> 8) & 0xf,
dev->version & 0xff);
}
-static DEVICE_ATTR(firmware, S_IRUGO, show_firmware, NULL);
+static DEVICE_ATTR(firmware, 0444, show_firmware, NULL);
static ssize_t show_hardware(struct device *d,
struct device_attribute *attr, char *buf)
@@ -509,7 +509,7 @@ static ssize_t show_hardware(struct device *d,
(dev->version >> 24) & 0xf,
(dev->version >> 16) & 0xff);
}
-static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL);
+static DEVICE_ATTR(hardware, 0444, show_hardware, NULL);
static ssize_t show_nets(struct device *d,
struct device_attribute *attr, char *buf)
@@ -519,7 +519,7 @@ static ssize_t show_nets(struct device *d,
return sprintf(buf, "%d", dev->net_count);
}
-static DEVICE_ATTR(nets, S_IRUGO, show_nets, NULL);
+static DEVICE_ATTR(nets, 0444, show_nets, NULL);
static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
{
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index c2b04f505e16..d200a5b0651c 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -65,7 +65,7 @@ MODULE_ALIAS_RTNL_LINK(DRV_NAME);
*/
static bool echo; /* echo testing. Default: 0 (Off) */
-module_param(echo, bool, S_IRUGO);
+module_param(echo, bool, 0444);
MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 9a5d786b4885..3d2091099f7f 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -723,6 +723,24 @@ static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
STATS_TYPE_BANK0 | STATS_TYPE_BANK1);
}
+static const uint8_t *mv88e6xxx_atu_vtu_stats_strings[] = {
+ "atu_member_violation",
+ "atu_miss_violation",
+ "atu_full_violation",
+ "vtu_member_violation",
+ "vtu_miss_violation",
+};
+
+static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++)
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ mv88e6xxx_atu_vtu_stats_strings[i],
+ ETH_GSTRING_LEN);
+}
+
static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
uint8_t *data)
{
@@ -736,9 +754,12 @@ static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
if (chip->info->ops->serdes_get_strings) {
data += count * ETH_GSTRING_LEN;
- chip->info->ops->serdes_get_strings(chip, port, data);
+ count = chip->info->ops->serdes_get_strings(chip, port, data);
}
+ data += count * ETH_GSTRING_LEN;
+ mv88e6xxx_atu_vtu_get_strings(data);
+
mutex_unlock(&chip->reg_lock);
}
@@ -783,10 +804,13 @@ static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port)
if (chip->info->ops->serdes_get_sset_count)
serdes_count = chip->info->ops->serdes_get_sset_count(chip,
port);
- if (serdes_count < 0)
+ if (serdes_count < 0) {
count = serdes_count;
- else
- count += serdes_count;
+ goto out;
+ }
+ count += serdes_count;
+ count += ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings);
+
out:
mutex_unlock(&chip->reg_lock);
@@ -841,6 +865,16 @@ static int mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
0);
}
+static void mv88e6xxx_atu_vtu_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ *data++ = chip->ports[port].atu_member_violation;
+ *data++ = chip->ports[port].atu_miss_violation;
+ *data++ = chip->ports[port].atu_full_violation;
+ *data++ = chip->ports[port].vtu_member_violation;
+ *data++ = chip->ports[port].vtu_miss_violation;
+}
+
static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
@@ -849,12 +883,14 @@ static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port,
if (chip->info->ops->stats_get_stats)
count = chip->info->ops->stats_get_stats(chip, port, data);
+ mutex_lock(&chip->reg_lock);
if (chip->info->ops->serdes_get_stats) {
data += count;
- mutex_lock(&chip->reg_lock);
- chip->info->ops->serdes_get_stats(chip, port, data);
- mutex_unlock(&chip->reg_lock);
+ count = chip->info->ops->serdes_get_stats(chip, port, data);
}
+ data += count;
+ mv88e6xxx_atu_vtu_get_stats(chip, port, data);
+ mutex_unlock(&chip->reg_lock);
}
static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index bad211014e91..80490f66bc06 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -194,6 +194,11 @@ struct mv88e6xxx_port_hwtstamp {
struct mv88e6xxx_port {
u64 serdes_stats[2];
+ u64 atu_member_violation;
+ u64 atu_miss_violation;
+ u64 atu_full_violation;
+ u64 vtu_member_violation;
+ u64 vtu_miss_violation;
};
struct mv88e6xxx_chip {
@@ -409,10 +414,10 @@ struct mv88e6xxx_ops {
/* Statistics from the SERDES interface */
int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
- void (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
- uint8_t *data);
- void (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data);
+ int (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
+ uint8_t *data);
+ int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
/* VLAN Translation Unit operations */
int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 20d941f4273b..307410898fc9 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -336,8 +336,6 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
if (err)
goto out;
- mutex_unlock(&chip->reg_lock);
-
if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
dev_err_ratelimited(chip->dev,
"ATU age out violation for %pM\n",
@@ -348,17 +346,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
dev_err_ratelimited(chip->dev,
"ATU member violation for %pM portvec %x\n",
entry.mac, entry.portvec);
+ chip->ports[entry.portvec].atu_member_violation++;
}
- if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION)
+ if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
dev_err_ratelimited(chip->dev,
"ATU miss violation for %pM portvec %x\n",
entry.mac, entry.portvec);
+ chip->ports[entry.portvec].atu_miss_violation++;
+ }
- if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION)
+ if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
dev_err_ratelimited(chip->dev,
"ATU full violation for %pM portvec %x\n",
entry.mac, entry.portvec);
+ chip->ports[entry.portvec].atu_full_violation++;
+ }
+ mutex_unlock(&chip->reg_lock);
return IRQ_HANDLED;
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 7997961647de..058326924f3e 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -539,18 +539,21 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
if (err)
goto out;
- mutex_unlock(&chip->reg_lock);
-
spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK;
if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
entry.vid, spid);
+ chip->ports[spid].vtu_member_violation++;
}
- if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION)
- dev_err_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
+ if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
+ dev_dbg_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
entry.vid, spid);
+ chip->ports[spid].vtu_miss_violation++;
+ }
+
+ mutex_unlock(&chip->reg_lock);
return IRQ_HANDLED;
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index b6166424216a..fb058fd35c0d 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -106,20 +106,21 @@ int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
return 0;
}
-void mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
- int port, uint8_t *data)
+int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data)
{
struct mv88e6352_serdes_hw_stat *stat;
int i;
if (!mv88e6352_port_has_serdes(chip, port))
- return;
+ return 0;
for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
stat = &mv88e6352_serdes_hw_stats[i];
memcpy(data + i * ETH_GSTRING_LEN, stat->string,
ETH_GSTRING_LEN);
}
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
static uint64_t mv88e6352_serdes_get_stat(struct mv88e6xxx_chip *chip,
@@ -149,8 +150,8 @@ static uint64_t mv88e6352_serdes_get_stat(struct mv88e6xxx_chip *chip,
return val;
}
-void mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
{
struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
struct mv88e6352_serdes_hw_stat *stat;
@@ -158,7 +159,7 @@ void mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
int i;
if (!mv88e6352_port_has_serdes(chip, port))
- return;
+ return 0;
BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
ARRAY_SIZE(mv88e6xxx_port->serdes_stats));
@@ -169,6 +170,8 @@ void mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
mv88e6xxx_port->serdes_stats[i] += value;
data[i] = mv88e6xxx_port->serdes_stats[i];
}
+
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 641baa75f910..1897c01c6e19 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -45,8 +45,8 @@
int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
-void mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
- int port, uint8_t *data);
-void mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data);
+int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data);
+int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
#endif
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index c56ac9ebc08f..fe6c834c422e 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -117,7 +117,7 @@ static const char version[] =
static int apne_owned; /* signal if card already owned */
static u32 apne_msg_enable;
-module_param_named(msg_enable, apne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+module_param_named(msg_enable, apne_msg_enable, uint, 0444);
MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
struct net_device * __init apne_probe(int unit)
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index 5d9bbde9fe68..c9c55c9eab9f 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -113,7 +113,7 @@ static void __NS8390_init(struct net_device *dev, int startp);
static unsigned version_printed;
static u32 msg_enable;
-module_param(msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+module_param(msg_enable, uint, 0444);
MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
/*
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 4cdff6e6af89..99a2453eb34f 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -77,7 +77,7 @@ static u32 ne_msg_enable;
module_param_hw_array(io, int, ioport, NULL, 0);
module_param_hw_array(irq, int, irq, NULL, 0);
module_param_array(bad, int, NULL, 0);
-module_param_named(msg_enable, ne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+module_param_named(msg_enable, ne_msg_enable, uint, 0444);
MODULE_PARM_DESC(io, "I/O base address(es),required");
MODULE_PARM_DESC(irq, "IRQ number(s)");
MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 1bdea746926c..42985a82321a 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -76,7 +76,7 @@ MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
MODULE_DESCRIPTION("PCI NE2000 clone driver");
MODULE_LICENSE("GPL");
-module_param_named(msg_enable, ne2k_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+module_param_named(msg_enable, ne2k_msg_enable, uint, 0444);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 4e02f6a23575..3fe3b4dfa7c5 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -563,7 +563,7 @@ static int irq[MAX_ULTRA_CARDS];
module_param_hw_array(io, int, ioport, NULL, 0);
module_param_hw_array(irq, int, irq, NULL, 0);
-module_param_named(msg_enable, ultra_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+module_param_named(msg_enable, ultra_msg_enable, uint, 0444);
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index aca957d4e121..1f0670cd3ea3 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -71,7 +71,7 @@ static void stnic_init (struct net_device *dev);
static u32 stnic_msg_enable;
-module_param_named(msg_enable, stnic_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+module_param_named(msg_enable, stnic_msg_enable, uint, 0444);
MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
/* SH7750 specific read/write io. */
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index fb17c2c7e1dd..c834123560f1 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -507,7 +507,7 @@ module_param_hw_array(io, int, ioport, NULL, 0);
module_param_hw_array(irq, int, irq, NULL, 0);
module_param_hw_array(mem, int, iomem, NULL, 0);
module_param_hw_array(mem_end, int, iomem, NULL, 0);
-module_param_named(msg_enable, wd_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH));
+module_param_named(msg_enable, wd_msg_enable, uint, 0444);
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)");
MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)");
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index b6cf4b6962f5..908218561fdd 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -129,6 +129,7 @@ config FEALNX
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
+source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
config NET_NETX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 3cdf01e96e0b..d732e9522b76 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
+obj-$(CONFIG_NET_VENDOR_NI) += ni/
obj-$(CONFIG_NET_NETX) += netx-eth.o
obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/
obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 527908c7e384..baca8f704a45 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -56,7 +56,7 @@
static atomic_t instance_count = ATOMIC_INIT(~0);
/* Module parameters */
static int debug = -1;
-module_param(debug, int, S_IRUGO | S_IWUSR);
+module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
@@ -65,12 +65,12 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
#define RX_DESCRIPTORS 64
static int dma_rx_num = RX_DESCRIPTORS;
-module_param(dma_rx_num, int, S_IRUGO | S_IWUSR);
+module_param(dma_rx_num, int, 0644);
MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
#define TX_DESCRIPTORS 64
static int dma_tx_num = TX_DESCRIPTORS;
-module_param(dma_tx_num, int, S_IRUGO | S_IWUSR);
+module_param(dma_tx_num, int, 0644);
MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index bf2de5298005..1b9d3130af4d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -631,8 +631,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
*/
wmb();
- writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+ writel_relaxed(mmio_read_reg,
+ ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+ mmiowb();
for (i = 0; i < timeout; i++) {
if (read_resp->req_id == mmio_read->seq_num)
break;
@@ -1826,7 +1828,9 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head,
+ dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ mmiowb();
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 2f7657227cfe..6fdc753d9483 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -107,7 +107,8 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
return io_sq->q_depth - 1 - cnt;
}
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
+ bool relaxed)
{
u16 tail;
@@ -116,7 +117,10 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
- writel(tail, io_sq->db_addr);
+ if (relaxed)
+ writel_relaxed(tail, io_sq->db_addr);
+ else
+ writel(tail, io_sq->db_addr);
return 0;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 6975150d144e..a822e70c2af3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -556,7 +556,8 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
* issue a doorbell
*/
wmb();
- ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
+ ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
+ mmiowb();
}
rx_ring->next_to_use = next_to_use;
@@ -2151,7 +2152,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (netif_xmit_stopped(txq) || !skb->xmit_more) {
/* trigger the dma engine */
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.doorbells++;
u64_stats_update_end(&tx_ring->syncp);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 100adee778df..7c204f05b418 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -137,21 +137,21 @@ static unsigned int ecc_ded_period = 600;
#ifdef CONFIG_AMD_XGBE_HAVE_ECC
/* Only expose the ECC parameters if supported */
-module_param(ecc_sec_info_threshold, uint, S_IWUSR | S_IRUGO);
+module_param(ecc_sec_info_threshold, uint, 0644);
MODULE_PARM_DESC(ecc_sec_info_threshold,
" ECC corrected error informational threshold setting");
-module_param(ecc_sec_warn_threshold, uint, S_IWUSR | S_IRUGO);
+module_param(ecc_sec_warn_threshold, uint, 0644);
MODULE_PARM_DESC(ecc_sec_warn_threshold,
" ECC corrected error warning threshold setting");
-module_param(ecc_sec_period, uint, S_IWUSR | S_IRUGO);
+module_param(ecc_sec_period, uint, 0644);
MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
-module_param(ecc_ded_threshold, uint, S_IWUSR | S_IRUGO);
+module_param(ecc_ded_threshold, uint, 0644);
MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
-module_param(ecc_ded_period, uint, S_IWUSR | S_IRUGO);
+module_param(ecc_ded_period, uint, 0644);
MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index d91fa595be98..795e556d4a3f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -131,7 +131,7 @@ MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
static int debug = -1;
-module_param(debug, int, S_IWUSR | S_IRUGO);
+module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, " Network interface message level setting");
static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index d3b847ec7465..84d7f4dd4ce1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -505,7 +505,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
err_exit:;
}
-int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
+static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
{
u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 8eef9fb6b1fe..2326cc219c46 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1190,7 +1190,7 @@ static int bgmac_open(struct net_device *net_dev)
bgmac_chip_init(bgmac);
err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
- KBUILD_MODNAME, net_dev);
+ net_dev->name, net_dev);
if (err < 0) {
dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
bgmac_dma_cleanup(bgmac);
@@ -1492,6 +1492,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
struct net_device *net_dev = bgmac->net_dev;
int err;
+ bgmac_chip_intrs_off(bgmac);
+
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
dev_set_drvdata(bgmac->dev, bgmac);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 5e34b34f7740..9ffc4a8c5fc7 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -87,7 +87,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
static int disable_msi = 0;
-module_param(disable_msi, int, S_IRUGO);
+module_param(disable_msi, int, 0444);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
typedef enum {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 352beff796ae..d847e1b9c37b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -166,6 +166,12 @@ do { \
#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
+#define REG_WR_RELAXED(bp, offset, val) \
+ writel_relaxed((u32)val, REG_ADDR(bp, offset))
+
+#define REG_WR16_RELAXED(bp, offset, val) \
+ writew_relaxed((u16)val, REG_ADDR(bp, offset))
+
#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
@@ -758,10 +764,8 @@ struct bnx2x_fastpath {
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8"
#endif
-#define DOORBELL(bp, cid, val) \
- do { \
- writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
- } while (0)
+#define DOORBELL_RELAXED(bp, cid, val) \
+ writel_relaxed((u32)(val), (bp)->doorbells + ((bp)->db_size * (cid)))
/* TX CSUM helpers */
#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d7c98e807ca8..95871576ab92 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4153,9 +4153,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
txdata->tx_db.data.prod += nbd;
- barrier();
+ /* make sure descriptor update is observed by HW */
+ wmb();
- DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+ DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
mmiowb();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a5265e1344f1..a8ce5c55bbb0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -522,8 +522,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
wmb();
for (i = 0; i < sizeof(rx_prods)/4; i++)
- REG_WR(bp, fp->ustorm_rx_prods_offset + i*4,
- ((u32 *)&rx_prods)[i]);
+ REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4,
+ ((u32 *)&rx_prods)[i]);
mmiowb(); /* keep prod updates ordered */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 1e33abde4a3e..da18aa239acb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2591,8 +2591,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
wmb();
txdata->tx_db.data.prod += 2;
- barrier();
- DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+ /* make sure descriptor update is observed by the HW */
+ wmb();
+ DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
mmiowb();
barrier();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b8388e93520a..c766ae23bc74 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -97,29 +97,29 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
int bnx2x_num_queues;
-module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
+module_param_named(num_queues, bnx2x_num_queues, int, 0444);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
static int disable_tpa;
-module_param(disable_tpa, int, S_IRUGO);
+module_param(disable_tpa, int, 0444);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
static int int_mode;
-module_param(int_mode, int, S_IRUGO);
+module_param(int_mode, int, 0444);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
-module_param(dropless_fc, int, S_IRUGO);
+module_param(dropless_fc, int, 0444);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
static int mrrs = -1;
-module_param(mrrs, int, S_IRUGO);
+module_param(mrrs, int, 0444);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
static int debug;
-module_param(debug, int, S_IRUGO);
+module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, " Default debug msglevel");
static struct workqueue_struct *bnx2x_wq;
@@ -3817,8 +3817,8 @@ static void bnx2x_sp_prod_update(struct bnx2x *bp)
*/
mb();
- REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
- bp->spq_prod_idx);
+ REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+ bp->spq_prod_idx);
mmiowb();
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 76a4668c50fe..8e0a317b31f7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -170,7 +170,9 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
wmb();
/* Trigger the PF FW */
- writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+ writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+
+ mmiowb();
/* Wait for PF to complete */
while ((tout >= 0) && (!*done)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c7e5e6f09647..3ff5f65758a3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1922,7 +1922,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
/* Sync BD data before updating doorbell */
wmb();
- bnxt_db_write(bp, db, DB_KEY_TX | prod);
+ bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod);
}
cpr->cp_raw_cons = raw_cons;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5e3d62189cab..960162c9386c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1402,6 +1402,15 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
+/* For TX and RX ring doorbells with no ordering guarantee*/
+static inline void bnxt_db_write_relaxed(struct bnxt *bp, void __iomem *db,
+ u32 val)
+{
+ writel_relaxed(val, db);
+ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
+ writel_relaxed(val, db);
+}
+
/* For TX and RX ring doorbells */
static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val)
{
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 7db8edc643ec..2f4cb5c11e18 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1348,7 +1348,7 @@ static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
dma_unmap_addr_set(cb, dma_addr, 0);
}
- return 0;
+ return NULL;
}
/* Simple helper to free a receive control block's resources */
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index ecdef42f0ae6..ef4a0c326736 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -63,24 +63,24 @@ MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
/* 1 normal messages, 0 quiet .. 7 verbose. */
static int debug = 1;
-module_param(debug, int, S_IRUGO);
+module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "Debug messages");
#ifdef CONFIG_SBMAC_COALESCE
static int int_pktcnt_tx = 255;
-module_param(int_pktcnt_tx, int, S_IRUGO);
+module_param(int_pktcnt_tx, int, 0444);
MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
static int int_timeout_tx = 255;
-module_param(int_timeout_tx, int, S_IRUGO);
+module_param(int_timeout_tx, int, 0444);
MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
static int int_pktcnt_rx = 64;
-module_param(int_pktcnt_rx, int, S_IRUGO);
+module_param(int_pktcnt_rx, int, 0444);
MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
static int int_timeout_rx = 64;
-module_param(int_timeout_rx, int, S_IRUGO);
+module_param(int_timeout_rx, int, 0444);
MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f2593978ae75..08bbb639be1a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10799,11 +10799,11 @@ static ssize_t tg3_show_temp(struct device *dev,
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
TG3_TEMP_SENSOR_OFFSET);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
+static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
TG3_TEMP_CAUTION_OFFSET);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
+static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
TG3_TEMP_MAX_OFFSET);
static struct attribute *tg3_attrs[] = {
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index a843076597ec..69cc3e0119d6 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -46,7 +46,7 @@ module_param(bnad_ioc_auto_recover, uint, 0444);
MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
static uint bna_debugfs_enable = 1;
-module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
+module_param(bna_debugfs_enable, uint, 0644);
MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
" Range[false:0|true:1]");
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index cebfe3bd086e..933799be0471 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -486,11 +486,11 @@ struct bnad_debugfs_entry {
};
static const struct bnad_debugfs_entry bnad_debugfs_files[] = {
- { "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, },
- { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, },
- { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, },
- { "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, },
- { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, },
+ { "fwtrc", S_IFREG | 0444, &bnad_debugfs_op_fwtrc, },
+ { "fwsave", S_IFREG | 0444, &bnad_debugfs_op_fwsave, },
+ { "regrd", S_IFREG | 0644, &bnad_debugfs_op_regrd, },
+ { "regwr", S_IFREG | 0200, &bnad_debugfs_op_regwr, },
+ { "drvinfo", S_IFREG | 0444, &bnad_debugfs_op_drvinfo, },
};
static struct dentry *bna_debugfs_root;
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index d59497a7bdce..6aeb1045c302 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -336,18 +336,7 @@ static struct pci_driver cavium_ptp_driver = {
.remove = cavium_ptp_remove,
};
-static int __init cavium_ptp_init_module(void)
-{
- return pci_register_driver(&cavium_ptp_driver);
-}
-
-static void __exit cavium_ptp_cleanup_module(void)
-{
- pci_unregister_driver(&cavium_ptp_driver);
-}
-
-module_init(cavium_ptp_init_module);
-module_exit(cavium_ptp_cleanup_module);
+module_pci_driver(cavium_ptp_driver);
MODULE_DESCRIPTION(DRV_NAME);
MODULE_AUTHOR("Cavium Networks <[email protected]>");
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 58b5c75fd2ee..43c5ba0af12b 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1635,28 +1635,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
}
/**
- * \brief Check Tx queue state for a given network buffer
- * @param lio per-network private data
- * @param skb network buffer
- */
-static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
-{
- int q, iq;
-
- q = skb->queue_mapping;
- iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
-
- if (octnet_iq_is_full(lio->oct_dev, iq))
- return 0;
-
- if (__netif_subqueue_stopped(lio->netdev, q)) {
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
- netif_wake_subqueue(lio->netdev, q);
- }
- return 1;
-}
-
-/**
* \brief Unmap and free network buffer
* @param buf buffer
*/
@@ -1673,8 +1651,6 @@ static void free_netbuf(void *buf)
dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
DMA_TO_DEVICE);
- check_txq_state(lio, skb);
-
tx_buffer_free(skb);
}
@@ -1715,8 +1691,6 @@ static void free_netsgbuf(void *buf)
list_add_tail(&g->list, &lio->glist[iq]);
spin_unlock(&lio->glist_lock[iq]);
- check_txq_state(lio, skb); /* mq support: sub-queue state check */
-
tx_buffer_free(skb);
}
@@ -1762,8 +1736,6 @@ static void free_netsgbuf_with_resp(void *buf)
spin_unlock(&lio->glist_lock[iq]);
/* Don't free the skb yet */
-
- check_txq_state(lio, skb);
}
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index d5f5c9a693ee..dc62698bdaf7 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -954,29 +954,6 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
}
/**
- * \brief Check Tx queue state for a given network buffer
- * @param lio per-network private data
- * @param skb network buffer
- */
-static int check_txq_state(struct lio *lio, struct sk_buff *skb)
-{
- int q, iq;
-
- q = skb->queue_mapping;
- iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no;
-
- if (octnet_iq_is_full(lio->oct_dev, iq))
- return 0;
-
- if (__netif_subqueue_stopped(lio->netdev, q)) {
- INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
- netif_wake_subqueue(lio->netdev, q);
- }
-
- return 1;
-}
-
-/**
* \brief Unmap and free network buffer
* @param buf buffer
*/
@@ -993,8 +970,6 @@ static void free_netbuf(void *buf)
dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
DMA_TO_DEVICE);
- check_txq_state(lio, skb);
-
tx_buffer_free(skb);
}
@@ -1036,8 +1011,6 @@ static void free_netsgbuf(void *buf)
list_add_tail(&g->list, &lio->glist[iq]);
spin_unlock(&lio->glist_lock[iq]);
- check_txq_state(lio, skb); /* mq support: sub-queue state check */
-
tx_buffer_free(skb);
}
@@ -1083,8 +1056,6 @@ static void free_netsgbuf_with_resp(void *buf)
spin_unlock(&lio->glist_lock[iq]);
/* Don't free the skb yet */
-
- check_txq_state(lio, skb);
}
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 82a783db5baf..75eea83c7cc6 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -712,9 +712,13 @@ union oct_txpciq {
u64 pkind:6;
u64 use_qpg:1;
u64 qpg:11;
- u64 reserved:30;
+ u64 reserved0:10;
+ u64 ctrl_qpg:11;
+ u64 reserved:9;
#else
- u64 reserved:30;
+ u64 reserved:9;
+ u64 ctrl_qpg:11;
+ u64 reserved0:10;
u64 qpg:11;
u64 use_qpg:1;
u64 pkind:6;
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 2766af05b89e..b1270355b0b1 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -628,7 +628,8 @@ octeon_prepare_soft_command(struct octeon_device *oct,
pki_ih3->tag = LIO_CONTROL;
pki_ih3->tagtype = ATOMIC_TAG;
pki_ih3->qpg =
- oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
+ oct->instr_queue[sc->iq_no]->txpciq.s.ctrl_qpg;
+
pki_ih3->pm = 0x7;
pki_ih3->sl = 8;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 7d9c5ffbd041..73fe3881414b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -63,7 +63,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug message level bitmap");
static int cpi_alg = CPI_ALG_NONE;
-module_param(cpi_alg, int, S_IRUGO);
+module_param(cpi_alg, int, 0444);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 185fe8df7628..2edfdbdaae48 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -776,11 +776,11 @@ static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
#define CXGB3_ATTR_R(name, val_expr) \
CXGB3_SHOW(name, val_expr) \
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
#define CXGB3_ATTR_RW(name, val_expr, store_method) \
CXGB3_SHOW(name, val_expr) \
-static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
+static DEVICE_ATTR(name, 0644, show_##name, store_method)
CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
@@ -859,7 +859,7 @@ static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
{ \
return tm_attr_store(d, buf, len, sched); \
} \
-static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
+static DEVICE_ATTR(name, 0644, show_##name, store_##name)
TM_ATTR(sched0, 0);
TM_ATTR(sched1, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index de2ba86eccfd..251d5bdc972f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2752,7 +2752,7 @@ DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
{
- debugfs_create_file_size(name, S_IRUSR, adap->debugfs_root,
+ debugfs_create_file_size(name, 0400, adap->debugfs_root,
(void *)adap + idx, &mem_debugfs_fops,
size_mb << 20);
}
@@ -2947,65 +2947,65 @@ int t4_setup_debugfs(struct adapter *adap)
struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
- { "cim_la", &cim_la_fops, S_IRUSR, 0 },
- { "cim_pif_la", &cim_pif_la_fops, S_IRUSR, 0 },
- { "cim_ma_la", &cim_ma_la_fops, S_IRUSR, 0 },
- { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
- { "clk", &clk_debugfs_fops, S_IRUSR, 0 },
- { "devlog", &devlog_fops, S_IRUSR, 0 },
- { "mboxlog", &mboxlog_fops, S_IRUSR, 0 },
- { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
- { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
- { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
- { "mbox3", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
- { "mbox4", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 4 },
- { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
- { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
- { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
- { "trace0", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
- { "trace1", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
- { "trace2", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
- { "trace3", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
- { "l2t", &t4_l2t_fops, S_IRUSR, 0},
- { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
- { "rss", &rss_debugfs_fops, S_IRUSR, 0 },
- { "rss_config", &rss_config_debugfs_fops, S_IRUSR, 0 },
- { "rss_key", &rss_key_debugfs_fops, S_IRUSR, 0 },
- { "rss_pf_config", &rss_pf_config_debugfs_fops, S_IRUSR, 0 },
- { "rss_vf_config", &rss_vf_config_debugfs_fops, S_IRUSR, 0 },
- { "sge_qinfo", &sge_qinfo_debugfs_fops, S_IRUSR, 0 },
- { "ibq_tp0", &cim_ibq_fops, S_IRUSR, 0 },
- { "ibq_tp1", &cim_ibq_fops, S_IRUSR, 1 },
- { "ibq_ulp", &cim_ibq_fops, S_IRUSR, 2 },
- { "ibq_sge0", &cim_ibq_fops, S_IRUSR, 3 },
- { "ibq_sge1", &cim_ibq_fops, S_IRUSR, 4 },
- { "ibq_ncsi", &cim_ibq_fops, S_IRUSR, 5 },
- { "obq_ulp0", &cim_obq_fops, S_IRUSR, 0 },
- { "obq_ulp1", &cim_obq_fops, S_IRUSR, 1 },
- { "obq_ulp2", &cim_obq_fops, S_IRUSR, 2 },
- { "obq_ulp3", &cim_obq_fops, S_IRUSR, 3 },
- { "obq_sge", &cim_obq_fops, S_IRUSR, 4 },
- { "obq_ncsi", &cim_obq_fops, S_IRUSR, 5 },
- { "tp_la", &tp_la_fops, S_IRUSR, 0 },
- { "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 },
- { "sensors", &sensors_debugfs_fops, S_IRUSR, 0 },
- { "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 },
- { "tx_rate", &tx_rate_debugfs_fops, S_IRUSR, 0 },
- { "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 },
+ { "cim_la", &cim_la_fops, 0400, 0 },
+ { "cim_pif_la", &cim_pif_la_fops, 0400, 0 },
+ { "cim_ma_la", &cim_ma_la_fops, 0400, 0 },
+ { "cim_qcfg", &cim_qcfg_fops, 0400, 0 },
+ { "clk", &clk_debugfs_fops, 0400, 0 },
+ { "devlog", &devlog_fops, 0400, 0 },
+ { "mboxlog", &mboxlog_fops, 0400, 0 },
+ { "mbox0", &mbox_debugfs_fops, 0600, 0 },
+ { "mbox1", &mbox_debugfs_fops, 0600, 1 },
+ { "mbox2", &mbox_debugfs_fops, 0600, 2 },
+ { "mbox3", &mbox_debugfs_fops, 0600, 3 },
+ { "mbox4", &mbox_debugfs_fops, 0600, 4 },
+ { "mbox5", &mbox_debugfs_fops, 0600, 5 },
+ { "mbox6", &mbox_debugfs_fops, 0600, 6 },
+ { "mbox7", &mbox_debugfs_fops, 0600, 7 },
+ { "trace0", &mps_trc_debugfs_fops, 0600, 0 },
+ { "trace1", &mps_trc_debugfs_fops, 0600, 1 },
+ { "trace2", &mps_trc_debugfs_fops, 0600, 2 },
+ { "trace3", &mps_trc_debugfs_fops, 0600, 3 },
+ { "l2t", &t4_l2t_fops, 0400, 0},
+ { "mps_tcam", &mps_tcam_debugfs_fops, 0400, 0 },
+ { "rss", &rss_debugfs_fops, 0400, 0 },
+ { "rss_config", &rss_config_debugfs_fops, 0400, 0 },
+ { "rss_key", &rss_key_debugfs_fops, 0400, 0 },
+ { "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
+ { "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
+ { "sge_qinfo", &sge_qinfo_debugfs_fops, 0400, 0 },
+ { "ibq_tp0", &cim_ibq_fops, 0400, 0 },
+ { "ibq_tp1", &cim_ibq_fops, 0400, 1 },
+ { "ibq_ulp", &cim_ibq_fops, 0400, 2 },
+ { "ibq_sge0", &cim_ibq_fops, 0400, 3 },
+ { "ibq_sge1", &cim_ibq_fops, 0400, 4 },
+ { "ibq_ncsi", &cim_ibq_fops, 0400, 5 },
+ { "obq_ulp0", &cim_obq_fops, 0400, 0 },
+ { "obq_ulp1", &cim_obq_fops, 0400, 1 },
+ { "obq_ulp2", &cim_obq_fops, 0400, 2 },
+ { "obq_ulp3", &cim_obq_fops, 0400, 3 },
+ { "obq_sge", &cim_obq_fops, 0400, 4 },
+ { "obq_ncsi", &cim_obq_fops, 0400, 5 },
+ { "tp_la", &tp_la_fops, 0400, 0 },
+ { "ulprx_la", &ulprx_la_fops, 0400, 0 },
+ { "sensors", &sensors_debugfs_fops, 0400, 0 },
+ { "pm_stats", &pm_stats_debugfs_fops, 0400, 0 },
+ { "tx_rate", &tx_rate_debugfs_fops, 0400, 0 },
+ { "cctrl", &cctrl_tbl_debugfs_fops, 0400, 0 },
#if IS_ENABLED(CONFIG_IPV6)
- { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
+ { "clip_tbl", &clip_tbl_debugfs_fops, 0400, 0 },
#endif
- { "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
- { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
- { "meminfo", &meminfo_fops, S_IRUSR, 0 },
- { "crypto", &chcr_stats_debugfs_fops, S_IRUSR, 0 },
+ { "tids", &tid_info_debugfs_fops, 0400, 0},
+ { "blocked_fl", &blocked_fl_fops, 0600, 0 },
+ { "meminfo", &meminfo_fops, 0400, 0 },
+ { "crypto", &chcr_stats_debugfs_fops, 0400, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
*/
static struct t4_debugfs_entry t5_debugfs_files[] = {
- { "obq_sge_rx_q0", &cim_obq_fops, S_IRUSR, 6 },
- { "obq_sge_rx_q1", &cim_obq_fops, S_IRUSR, 7 },
+ { "obq_sge_rx_q0", &cim_obq_fops, 0400, 6 },
+ { "obq_sge_rx_q1", &cim_obq_fops, 0400, 7 },
};
add_debugfs_files(adap,
@@ -3050,11 +3050,11 @@ int t4_setup_debugfs(struct adapter *adap)
}
}
- de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
+ de = debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap,
&flash_debugfs_fops, adap->params.sf_size);
- debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR,
+ debugfs_create_bool("use_backdoor", 0600,
adap->debugfs_root, &adap->use_bd);
- debugfs_create_bool("trace_rss", S_IWUSR | S_IRUSR,
+ debugfs_create_bool("trace_rss", 0600,
adap->debugfs_root, &adap->trace_rss);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 7bd8497fd9be..9a81b52307a9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2402,11 +2402,11 @@ struct cxgb4vf_debugfs_entry {
};
static struct cxgb4vf_debugfs_entry debugfs_files[] = {
- { "mboxlog", S_IRUGO, &mboxlog_fops },
- { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
- { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
- { "resources", S_IRUGO, &resources_proc_fops },
- { "interfaces", S_IRUGO, &interfaces_proc_fops },
+ { "mboxlog", 0444, &mboxlog_fops },
+ { "sge_qinfo", 0444, &sge_qinfo_debugfs_fops },
+ { "sge_qstats", 0444, &sge_qstats_proc_fops },
+ { "resources", 0444, &resources_proc_fops },
+ { "interfaces", 0444, &interfaces_proc_fops },
};
/*
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 1b79a6defd56..d71cba0842c5 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -602,7 +602,7 @@ static struct pci_driver pci_driver = {
};
module_pci_driver(pci_driver);
-module_param(polling_frequency, long, S_IRUGO);
+module_param(polling_frequency, long, 0444);
MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 5774fb6f8aa0..c697e79e491e 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -34,11 +34,11 @@ MODULE_LICENSE("GPL");
* Use sysfs method to enable/disable VFs.
*/
static unsigned int num_vfs;
-module_param(num_vfs, uint, S_IRUGO);
+module_param(num_vfs, uint, 0444);
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
static ushort rx_frag_size = 2048;
-module_param(rx_frag_size, ushort, S_IRUGO);
+module_param(rx_frag_size, ushort, 0444);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
/* Per-module error detection/recovery workq shared across all functions.
@@ -5788,7 +5788,7 @@ static ssize_t be_hwmon_show_temp(struct device *dev,
adapter->hwmon_info.be_on_die_temp * 1000);
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+static SENSOR_DEVICE_ATTR(temp1_input, 0444,
be_hwmon_show_temp, NULL, 1);
static struct attribute *be_hwmon_attrs[] = {
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 8870a9a798ca..dc0850b3b517 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -2,7 +2,6 @@ config FSL_FMAN
tristate "FMan support"
depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
- depends on HAS_DMA
select PHYLIB
default n
help
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 4829dcd9e077..7b5b95f52c09 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -567,7 +567,6 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
}
pdev->dev.parent = priv->dev;
- set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 40a3eb70629e..a31b4adf6e6a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -764,7 +764,7 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
/* Config bd buffer end */
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
- HNS3_TXD_BDTYPE_M, 0);
+ HNS3_TXD_BDTYPE_S, 0);
hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 9d07116a4426..eb3c34f3cf87 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -638,7 +638,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
if (!h->ae_algo || !h->ae_algo->ops ||
!h->ae_algo->ops->get_rss_key_size)
- return -EOPNOTSUPP;
+ return 0;
return h->ae_algo->ops->get_rss_key_size(h);
}
@@ -649,7 +649,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
if (!h->ae_algo || !h->ae_algo->ops ||
!h->ae_algo->ops->get_rss_indir_size)
- return -EOPNOTSUPP;
+ return 0;
return h->ae_algo->ops->get_rss_indir_size(h);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index bede4117bad9..2066dd734444 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3466,8 +3466,6 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
struct hclge_vport *vport = hdev->vport;
int i;
- netdev_rss_key_fill(vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
-
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
vport[i].rss_tuple_sets.ipv4_tcp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
@@ -3487,6 +3485,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+
+ netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE);
}
hclge_rss_indir_init_cfg(hdev);
@@ -3584,6 +3584,9 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
int vector_id, ret;
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&handle->pdev->dev,
@@ -3781,13 +3784,16 @@ static int hclge_ae_start(struct hnae3_handle *handle)
clear_bit(HCLGE_STATE_DOWN, &hdev->state);
mod_timer(&hdev->service_timer, jiffies + HZ);
+ /* reset tqp stats */
+ hclge_reset_tqp_stats(handle);
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
ret = hclge_mac_start_phy(hdev);
if (ret)
return ret;
- /* reset tqp stats */
- hclge_reset_tqp_stats(handle);
-
return 0;
}
@@ -3797,6 +3803,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
struct hclge_dev *hdev = vport->back;
int i;
+ del_timer_sync(&hdev->service_timer);
+ cancel_work_sync(&hdev->service_task);
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ return;
+
for (i = 0; i < vport->alloc_tqps; i++)
hclge_tqp_enable(hdev, i, 0, false);
@@ -3807,8 +3819,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
hclge_update_link_status(hdev);
}
@@ -4940,6 +4950,9 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
u16 queue_gid;
int ret;
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ return;
+
queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
ret = hclge_tqp_enable(hdev, queue_id, 0, false);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index c1dea3a47bdd..682c2d6618e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -60,6 +60,9 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
struct hclge_desc desc;
int ret;
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
@@ -95,6 +98,9 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
struct hclge_desc desc;
int ret;
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 4878b7169e0f..ba580bfae512 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2903,8 +2903,7 @@ static ssize_t ehea_show_port_id(struct device *dev,
return sprintf(buf, "%d", port->logical_port_id);
}
-static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
- NULL);
+static DEVICE_ATTR(log_port_id, 0444, ehea_show_port_id, NULL);
static void logical_port_release(struct device *dev)
{
@@ -3235,8 +3234,8 @@ static ssize_t ehea_remove_port(struct device *dev,
return (ssize_t) count;
}
-static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
-static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
+static DEVICE_ATTR(probe_port, 0200, NULL, ehea_probe_port);
+static DEVICE_ATTR(remove_port, 0200, NULL, ehea_remove_port);
static int ehea_create_device_sysfs(struct platform_device *dev)
{
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index f210398200ec..c1b51edaaf62 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -82,7 +82,7 @@ module_param(rx_flush, uint, 0644);
MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
static bool old_large_send __read_mostly;
-module_param(old_large_send, bool, S_IRUGO);
+module_param(old_large_send, bool, 0444);
MODULE_PARM_DESC(old_large_send,
"Use old large send method on firmware that supports the new method");
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 1feb54b6d92e..14d287bed33c 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -251,6 +251,20 @@ config I40EVF
will be called i40evf. MSI-X interrupt support is required
for this driver to work correctly.
+config ICE
+ tristate "Intel(R) Ethernet Connection E800 Series Support"
+ default n
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) Ethernet Connection E800 Series of
+ devices. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide that can be located at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called ice.
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 90af7757a885..807a4f8c7e4e 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_I40EVF) += i40evf/
obj-$(CONFIG_FM10K) += fm10k/
+obj-$(CONFIG_ICE) += ice/
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1d33a8b3ef54..a44139c1de80 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -159,9 +159,17 @@ enum i40e_state_t {
__I40E_BAD_EEPROM,
__I40E_DOWN_REQUESTED,
__I40E_FD_FLUSH_REQUESTED,
+ __I40E_FD_ATR_AUTO_DISABLED,
+ __I40E_FD_SB_AUTO_DISABLED,
__I40E_RESET_FAILED,
__I40E_PORT_SUSPENDED,
__I40E_VF_DISABLE,
+ __I40E_MACVLAN_SYNC_PENDING,
+ __I40E_UDP_FILTER_SYNC_PENDING,
+ __I40E_TEMP_LINK_POLLING,
+ __I40E_CLIENT_SERVICE_REQUESTED,
+ __I40E_CLIENT_L2_CHANGE,
+ __I40E_CLIENT_RESET,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
@@ -510,40 +518,32 @@ struct i40e_pf {
#define I40E_HW_RESTART_AUTONEG BIT(18)
#define I40E_HW_STOPPABLE_FW_LLDP BIT(19)
- u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(0)
-#define I40E_FLAG_MSI_ENABLED BIT_ULL(1)
-#define I40E_FLAG_MSIX_ENABLED BIT_ULL(2)
-#define I40E_FLAG_RSS_ENABLED BIT_ULL(3)
-#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(4)
-#define I40E_FLAG_FILTER_SYNC BIT_ULL(5)
-#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(6)
-#define I40E_FLAG_DCB_CAPABLE BIT_ULL(7)
-#define I40E_FLAG_DCB_ENABLED BIT_ULL(8)
-#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(9)
-#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(10)
-#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(11)
-#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(12)
-#define I40E_FLAG_MFP_ENABLED BIT_ULL(13)
-#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(14)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(15)
-#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(16)
-#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(17)
-#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(18)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(19)
-#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(20)
-#define I40E_FLAG_LEGACY_RX BIT_ULL(21)
-#define I40E_FLAG_PTP BIT_ULL(22)
-#define I40E_FLAG_IWARP_ENABLED BIT_ULL(23)
-#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(24)
-#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(25)
-#define I40E_FLAG_CLIENT_RESET BIT_ULL(26)
-#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT_ULL(27)
-#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(28)
-#define I40E_FLAG_TC_MQPRIO BIT_ULL(29)
-#define I40E_FLAG_FD_SB_INACTIVE BIT_ULL(30)
-#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT_ULL(31)
-#define I40E_FLAG_DISABLE_FW_LLDP BIT_ULL(32)
+ u32 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
+#define I40E_FLAG_MSI_ENABLED BIT(1)
+#define I40E_FLAG_MSIX_ENABLED BIT(2)
+#define I40E_FLAG_RSS_ENABLED BIT(3)
+#define I40E_FLAG_VMDQ_ENABLED BIT(4)
+#define I40E_FLAG_SRIOV_ENABLED BIT(5)
+#define I40E_FLAG_DCB_CAPABLE BIT(6)
+#define I40E_FLAG_DCB_ENABLED BIT(7)
+#define I40E_FLAG_FD_SB_ENABLED BIT(8)
+#define I40E_FLAG_FD_ATR_ENABLED BIT(9)
+#define I40E_FLAG_MFP_ENABLED BIT(10)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(11)
+#define I40E_FLAG_VEB_MODE_ENABLED BIT(12)
+#define I40E_FLAG_VEB_STATS_ENABLED BIT(13)
+#define I40E_FLAG_LINK_POLLING_ENABLED BIT(14)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(15)
+#define I40E_FLAG_LEGACY_RX BIT(16)
+#define I40E_FLAG_PTP BIT(17)
+#define I40E_FLAG_IWARP_ENABLED BIT(18)
+#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(19)
+#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(20)
+#define I40E_FLAG_TC_MQPRIO BIT(21)
+#define I40E_FLAG_FD_SB_INACTIVE BIT(22)
+#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
+#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 999dea5a7c9e..d8ce4999864f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -376,9 +376,8 @@ void i40e_client_subtask(struct i40e_pf *pf)
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
int ret = 0;
- if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
+ if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state))
return;
- pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
cdev = pf->cinst;
/* If we're down or resetting, just bail */
@@ -459,7 +458,7 @@ int i40e_lan_add_device(struct i40e_pf *pf)
* added, we can schedule a subtask to go initiate the clients if
* they can be launched at probe time.
*/
- pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
out:
@@ -554,7 +553,7 @@ static void i40e_client_prepare(struct i40e_client *client)
pf = ldev->pf;
i40e_client_add_instance(pf);
/* Start the client subtask */
- pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
}
mutex_unlock(&i40e_device_mutex);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 846a9d597e01..b974482ff630 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3951,7 +3951,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return -EOPNOTSUPP;
- if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)
+ if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
return -ENOSPC;
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
@@ -4436,21 +4436,12 @@ flags_complete:
}
}
- /* Compare and exchange the new flags into place. If we failed, that
- * is if cmpxchg returns anything but the old value, this means that
- * something else has modified the flags variable since we copied it
- * originally. We'll just punt with an error and log something in the
- * message buffer.
- *
- * This is the point of no return for this function. We need to have
- * checked any discrepancies or misconfigurations and returned
- * EOPNOTSUPP before updating pf->flags here.
+ /* Now that we've checked to ensure that the new flags are valid, load
+ * them into place. Since we only modify flags either (a) during
+ * initialization or (b) while holding the RTNL lock, we don't need
+ * anything fancy here.
*/
- if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
- dev_warn(&pf->pdev->dev,
- "Unable to update pf->flags as it was modified by another thread...\n");
- return -EAGAIN;
- }
+ pf->flags = new_flags;
/* Process any additional changes needed as a result of flag changes.
* The changed_flags value reflects the list of bits that were
@@ -4460,7 +4451,7 @@ flags_complete:
/* Flush current ATR settings if ATR was disabled */
if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) &&
!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) {
- pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 536ed8e8a96f..16229998fb1e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1083,13 +1083,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_lpi_count, &nsd->rx_lpi_count);
if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
- !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
+ !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
nsd->fd_sb_status = true;
else
nsd->fd_sb_status = false;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
- !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
+ !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
nsd->fd_atr_status = true;
else
nsd->fd_atr_status = false;
@@ -1382,7 +1382,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
hash_add(vsi->mac_filter_hash, &f->hlist, key);
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
}
/* If we're asked to add a filter that has been marked for removal, it
@@ -1432,7 +1432,7 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
}
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->state);
}
/**
@@ -1955,7 +1955,7 @@ static void i40e_set_rx_mode(struct net_device *netdev)
/* check for other flag changes */
if (vsi->current_netdev_flags != vsi->netdev->flags) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
}
}
@@ -2577,9 +2577,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
{
int v;
- if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
+ if (!pf)
+ return;
+ if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
return;
- pf->flags &= ~I40E_FLAG_FILTER_SYNC;
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
@@ -2588,7 +2589,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
if (ret) {
/* come back and try again later */
- pf->flags |= I40E_FLAG_FILTER_SYNC;
+ set_bit(__I40E_MACVLAN_SYNC_PENDING,
+ pf->state);
break;
}
}
@@ -2632,8 +2634,8 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
- pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
- I40E_FLAG_CLIENT_L2_CHANGE);
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
return 0;
}
@@ -4720,9 +4722,9 @@ static void i40e_vsi_close(struct i40e_vsi *vsi)
i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi);
vsi->current_netdev_flags = 0;
- pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
- pf->flags |= I40E_FLAG_CLIENT_RESET;
+ set_bit(__I40E_CLIENT_RESET, pf->state);
}
/**
@@ -6493,7 +6495,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* On the next run of the service_task, notify any clients of the new
* opened netdev
*/
- pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
return 0;
@@ -8035,8 +8037,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
- pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
- I40E_FLAG_CLIENT_L2_CHANGE);
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
}
exit:
@@ -8142,12 +8144,10 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
**/
static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
{
- if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
- pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
+ if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
- }
}
/**
@@ -8156,7 +8156,7 @@ static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
**/
static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
{
- if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
+ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
/* ATR uses the same filtering logic as SB rules. It only
* functions properly if the input set mask is at the default
* settings. It is safe to restore the default input set
@@ -8166,7 +8166,6 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
- pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
@@ -8289,7 +8288,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
pf->fd_flush_timestamp = jiffies;
- pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -8309,7 +8308,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr && !pf->fd_tcp4_filter_cnt)
- pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
@@ -8433,13 +8432,12 @@ static void i40e_link_event(struct i40e_pf *pf)
/* On success, disable temp link polling */
if (status == I40E_SUCCESS) {
- if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
- pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
+ clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
} else {
/* Enable link polling temporarily until i40e_get_link_status
* returns I40E_SUCCESS
*/
- pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
+ set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
status);
return;
@@ -8491,7 +8489,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
pf->service_timer_previous = jiffies;
if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
- (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
+ test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
i40e_link_event(pf);
/* Update the stats for active netdevs so the network stack
@@ -9719,7 +9717,7 @@ static void i40e_sync_udp_filters(struct i40e_pf *pf)
pf->pending_udp_bitmap |= BIT_ULL(i);
}
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+ set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
}
/**
@@ -9733,11 +9731,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
u16 port;
int i;
- if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
+ if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
return;
- pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
-
for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
if (pf->pending_udp_bitmap & BIT_ULL(i)) {
pf->pending_udp_bitmap &= ~BIT_ULL(i);
@@ -9789,17 +9785,15 @@ static void i40e_service_task(struct work_struct *work)
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
- if (pf->flags & I40E_FLAG_CLIENT_RESET) {
+ if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
/* Client subtask will reopen next time through. */
i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
- pf->flags &= ~I40E_FLAG_CLIENT_RESET;
} else {
i40e_client_subtask(pf);
- if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
+ if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
+ pf->state))
i40e_notify_client_of_l2_param_changes(
pf->vsi[pf->lan_vsi]);
- pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
- }
}
i40e_sync_filters_subtask(pf);
i40e_sync_udp_filters_subtask(pf);
@@ -11291,20 +11285,18 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
need_reset = true;
i40e_fdir_filter_exit(pf);
}
- pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_FD_SB_AUTO_DISABLED);
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
/* reset fd counters */
pf->fd_add_err = 0;
pf->fd_atr_cnt = 0;
/* if ATR was auto disabled it can be re-enabled. */
- if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
- pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
- }
}
return need_reset;
}
@@ -11437,7 +11429,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
/* New port: add it and mark its index in the bitmap */
pf->udp_ports[next_idx].port = port;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+ set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
}
/**
@@ -11478,7 +11470,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
*/
pf->udp_ports[idx].port = 0;
pf->pending_udp_bitmap |= BIT_ULL(idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+ set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
return;
not_found:
@@ -11823,6 +11815,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
.ndo_bpf = i40e_xdp,
+ .ndo_xdp_xmit = i40e_xdp_xmit,
+ .ndo_xdp_flush = i40e_xdp_flush,
};
/**
@@ -12240,7 +12234,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- pf->flags |= I40E_FLAG_FILTER_SYNC;
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
}
/* Update VSI BW information */
@@ -14356,7 +14350,13 @@ static int __maybe_unused i40e_suspend(struct device *dev)
if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
- i40e_prep_for_reset(pf, false);
+ /* Since we're going to destroy queues during the
+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+ * whole section
+ */
+ rtnl_lock();
+
+ i40e_prep_for_reset(pf, true);
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
@@ -14368,6 +14368,8 @@ static int __maybe_unused i40e_suspend(struct device *dev)
*/
i40e_clear_interrupt_scheme(pf);
+ rtnl_unlock();
+
return 0;
}
@@ -14385,6 +14387,11 @@ static int __maybe_unused i40e_resume(struct device *dev)
if (!test_bit(__I40E_SUSPENDED, pf->state))
return 0;
+ /* We need to hold the RTNL lock prior to restoring interrupt schemes,
+ * since we're going to be restoring queues
+ */
+ rtnl_lock();
+
/* We cleared the interrupt scheme when we suspended, so we need to
* restore it now to resume device functionality.
*/
@@ -14395,7 +14402,9 @@ static int __maybe_unused i40e_resume(struct device *dev)
}
clear_bit(__I40E_DOWN, pf->state);
- i40e_reset_and_rebuild(pf, false, false);
+ i40e_reset_and_rebuild(pf, false, true);
+
+ rtnl_unlock();
/* Clear suspended state last after everything is recovered */
clear_bit(__I40E_SUSPENDED, pf->state);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 7ccd05bf4b06..f174c72480ab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -336,7 +336,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
} else {
pf->fd_tcp4_filter_cnt--;
}
@@ -594,8 +594,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
- pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
- pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
+ test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
+ /* These set_bit() calls aren't atomic with the
+ * test_bit() here, but worse case we potentially
+ * disable ATR and queue a flush right after SB
+ * support is re-enabled. That shouldn't cause an
+ * issue in practice
+ */
+ set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
}
@@ -608,11 +614,10 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
- !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
- pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
+ !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
+ pf->state))
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
- }
}
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
@@ -1583,9 +1588,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = i40e_rx_offset(rx_ring);
-
- /* initialize pagecnt_bias to 1 representing we fully own page */
- bi->pagecnt_bias = 1;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
return true;
}
@@ -1951,8 +1955,8 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
- if (unlikely(!pagecnt_bias)) {
- page_ref_add(page, USHRT_MAX);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@@ -2210,7 +2214,7 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
struct xdp_buff *xdp)
{
- int result = I40E_XDP_PASS;
+ int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
struct bpf_prog *xdp_prog;
u32 act;
@@ -2229,6 +2233,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_ring(xdp, xdp_ring);
break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
+ result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
@@ -2264,6 +2272,15 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
#endif
}
+static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
+{
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch.
+ */
+ wmb();
+ writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
+}
+
/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
@@ -2398,16 +2415,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
}
if (xdp_xmit) {
- struct i40e_ring *xdp_ring;
+ struct i40e_ring *xdp_ring =
+ rx_ring->vsi->xdp_rings[rx_ring->queue_index];
- xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch.
- */
- wmb();
-
- writel(xdp_ring->next_to_use, xdp_ring->tail);
+ i40e_xdp_ring_update_tail(xdp_ring);
+ xdp_do_flush_map();
}
rx_ring->skb = skb;
@@ -2647,7 +2659,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
- if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
+ if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
return;
/* if sampling is disabled do nothing */
@@ -2687,7 +2699,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
- if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
+ if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
return;
if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
/* HW ATR eviction will take care of removing filters on FIN
@@ -3655,3 +3667,49 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return i40e_xmit_frame_ring(skb, tx_ring);
}
+
+/**
+ * i40e_xdp_xmit - Implements ndo_xdp_xmit
+ * @dev: netdev
+ * @xdp: XDP buffer
+ *
+ * Returns Zero if sent, else an error code
+ **/
+int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ unsigned int queue_index = smp_processor_id();
+ struct i40e_vsi *vsi = np->vsi;
+ int err;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return -ENETDOWN;
+
+ if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
+ return -ENXIO;
+
+ err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
+ if (err != I40E_XDP_TX)
+ return -ENOSPC;
+
+ return 0;
+}
+
+/**
+ * i40e_xdp_flush - Implements ndo_xdp_flush
+ * @dev: netdev
+ **/
+void i40e_xdp_flush(struct net_device *dev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ unsigned int queue_index = smp_processor_id();
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (test_bit(__I40E_VSI_DOWN, vsi->state))
+ return;
+
+ if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
+ return;
+
+ i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 7f8220e65374..3043483ec426 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -510,6 +510,8 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
void i40e_detect_recover_hung(struct i40e_vsi *vsi);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
+int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp);
+void i40e_xdp_flush(struct net_device *dev);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
new file mode 100644
index 000000000000..4058673fd853
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018, Intel Corporation.
+
+#
+# Makefile for the Intel(R) Ethernet Connection E800 Series Linux Driver
+#
+
+obj-$(CONFIG_ICE) += ice.o
+
+ice-y := ice_main.o \
+ ice_controlq.o \
+ ice_common.o \
+ ice_nvm.o \
+ ice_switch.o \
+ ice_sched.o \
+ ice_txrx.o \
+ ice_ethtool.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
new file mode 100644
index 000000000000..d8b5fff581e7
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_H_
+#define _ICE_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/compiler.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/cpumask.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/workqueue.h>
+#include <linux/aer.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/bitmap.h>
+#include <linux/log2.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_bridge.h>
+#include <net/ipv6.h>
+#include "ice_devids.h"
+#include "ice_type.h"
+#include "ice_txrx.h"
+#include "ice_switch.h"
+#include "ice_common.h"
+#include "ice_sched.h"
+
+extern const char ice_drv_ver[];
+#define ICE_BAR0 0
+#define ICE_DFLT_NUM_DESC 128
+#define ICE_MIN_NUM_DESC 8
+#define ICE_MAX_NUM_DESC 8160
+#define ICE_REQ_DESC_MULTIPLE 32
+#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
+#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
+#define ICE_ETHTOOL_FWVER_LEN 32
+#define ICE_AQ_LEN 64
+#define ICE_MIN_MSIX 2
+#define ICE_NO_VSI 0xffff
+#define ICE_MAX_VSI_ALLOC 130
+#define ICE_MAX_TXQS 2048
+#define ICE_MAX_RXQS 2048
+#define ICE_VSI_MAP_CONTIG 0
+#define ICE_VSI_MAP_SCATTER 1
+#define ICE_MAX_SCATTER_TXQS 16
+#define ICE_MAX_SCATTER_RXQS 16
+#define ICE_Q_WAIT_RETRY_LIMIT 10
+#define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT)
+#define ICE_MAX_LG_RSS_QS 256
+#define ICE_MAX_SMALL_RSS_QS 8
+#define ICE_RES_VALID_BIT 0x8000
+#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
+#define ICE_INVAL_Q_INDEX 0xffff
+
+#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
+
+#define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \
+ ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+#define ICE_UP_TABLE_TRANSLATE(val, i) \
+ (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
+ ICE_AQ_VSI_UP_TABLE_UP##i##_M)
+
+#define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
+#define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
+#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
+
+/* Macro for each VSI in a PF */
+#define ice_for_each_vsi(pf, i) \
+ for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
+
+/* Macros for each tx/rx ring in a VSI */
+#define ice_for_each_txq(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
+
+#define ice_for_each_rxq(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
+
+struct ice_tc_info {
+ u16 qoffset;
+ u16 qcount;
+};
+
+struct ice_tc_cfg {
+ u8 numtc; /* Total number of enabled TCs */
+ u8 ena_tc; /* TX map */
+ struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
+};
+
+struct ice_res_tracker {
+ u16 num_entries;
+ u16 search_hint;
+ u16 list[1];
+};
+
+struct ice_sw {
+ struct ice_pf *pf;
+ u16 sw_id; /* switch ID for this switch */
+ u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
+};
+
+enum ice_state {
+ __ICE_DOWN,
+ __ICE_NEEDS_RESTART,
+ __ICE_RESET_RECOVERY_PENDING, /* set by driver when reset starts */
+ __ICE_PFR_REQ, /* set by driver and peers */
+ __ICE_CORER_REQ, /* set by driver and peers */
+ __ICE_GLOBR_REQ, /* set by driver and peers */
+ __ICE_CORER_RECV, /* set by OICR handler */
+ __ICE_GLOBR_RECV, /* set by OICR handler */
+ __ICE_EMPR_RECV, /* set by OICR handler */
+ __ICE_SUSPENDED, /* set on module remove path */
+ __ICE_RESET_FAILED, /* set by reset/rebuild */
+ __ICE_ADMINQ_EVENT_PENDING,
+ __ICE_FLTR_OVERFLOW_PROMISC,
+ __ICE_CFG_BUSY,
+ __ICE_SERVICE_SCHED,
+ __ICE_STATE_NBITS /* must be last */
+};
+
+enum ice_vsi_flags {
+ ICE_VSI_FLAG_UMAC_FLTR_CHANGED,
+ ICE_VSI_FLAG_MMAC_FLTR_CHANGED,
+ ICE_VSI_FLAG_VLAN_FLTR_CHANGED,
+ ICE_VSI_FLAG_PROMISC_CHANGED,
+ ICE_VSI_FLAG_NBITS /* must be last */
+};
+
+/* struct that defines a VSI, associated with a dev */
+struct ice_vsi {
+ struct net_device *netdev;
+ struct ice_sw *vsw; /* switch this VSI is on */
+ struct ice_pf *back; /* back pointer to PF */
+ struct ice_port_info *port_info; /* back pointer to port_info */
+ struct ice_ring **rx_rings; /* rx ring array */
+ struct ice_ring **tx_rings; /* tx ring array */
+ struct ice_q_vector **q_vectors; /* q_vector array */
+
+ irqreturn_t (*irq_handler)(int irq, void *data);
+
+ u64 tx_linearize;
+ DECLARE_BITMAP(state, __ICE_STATE_NBITS);
+ DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ unsigned int current_netdev_flags;
+ u32 tx_restart;
+ u32 tx_busy;
+ u32 rx_buf_failed;
+ u32 rx_page_failed;
+ int num_q_vectors;
+ int base_vector;
+ enum ice_vsi_type type;
+ u16 vsi_num; /* HW (absolute) index of this VSI */
+ u16 idx; /* software index in pf->vsi[] */
+
+ /* Interrupt thresholds */
+ u16 work_lmt;
+
+ /* RSS config */
+ u16 rss_table_size; /* HW RSS table size */
+ u16 rss_size; /* Allocated RSS queues */
+ u8 *rss_hkey_user; /* User configured hash keys */
+ u8 *rss_lut_user; /* User configured lookup table entries */
+ u8 rss_lut_type; /* used to configure Get/Set RSS LUT AQ call */
+
+ u16 max_frame;
+ u16 rx_buf_len;
+
+ struct ice_aqc_vsi_props info; /* VSI properties */
+
+ /* VSI stats */
+ struct rtnl_link_stats64 net_stats;
+ struct ice_eth_stats eth_stats;
+ struct ice_eth_stats eth_stats_prev;
+
+ struct list_head tmp_sync_list; /* MAC filters to be synced */
+ struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
+
+ bool irqs_ready;
+ bool current_isup; /* Sync 'link up' logging */
+ bool stat_offsets_loaded;
+
+ /* queue information */
+ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ u16 txq_map[ICE_MAX_TXQS]; /* index in pf->avail_txqs */
+ u16 rxq_map[ICE_MAX_RXQS]; /* index in pf->avail_rxqs */
+ u16 alloc_txq; /* Allocated Tx queues */
+ u16 num_txq; /* Used Tx queues */
+ u16 alloc_rxq; /* Allocated Rx queues */
+ u16 num_rxq; /* Used Rx queues */
+ u16 num_desc;
+ struct ice_tc_cfg tc_cfg;
+} ____cacheline_internodealigned_in_smp;
+
+/* struct that defines an interrupt vector */
+struct ice_q_vector {
+ struct ice_vsi *vsi;
+ cpumask_t affinity_mask;
+ struct napi_struct napi;
+ struct ice_ring_container rx;
+ struct ice_ring_container tx;
+ struct irq_affinity_notify affinity_notify;
+ u16 v_idx; /* index in the vsi->q_vector array. */
+ u8 num_ring_tx; /* total number of tx rings in vector */
+ u8 num_ring_rx; /* total number of rx rings in vector */
+ char name[ICE_INT_NAME_STR_LEN];
+} ____cacheline_internodealigned_in_smp;
+
+enum ice_pf_flags {
+ ICE_FLAG_MSIX_ENA,
+ ICE_FLAG_FLTR_SYNC,
+ ICE_FLAG_RSS_ENA,
+ ICE_PF_FLAGS_NBITS /* must be last */
+};
+
+struct ice_pf {
+ struct pci_dev *pdev;
+ struct msix_entry *msix_entries;
+ struct ice_res_tracker *irq_tracker;
+ struct ice_vsi **vsi; /* VSIs created by the driver */
+ struct ice_sw *first_sw; /* first switch created by firmware */
+ DECLARE_BITMAP(state, __ICE_STATE_NBITS);
+ DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
+ DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
+ DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
+ unsigned long serv_tmr_period;
+ unsigned long serv_tmr_prev;
+ struct timer_list serv_tmr;
+ struct work_struct serv_task;
+ struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
+ struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
+ u32 msg_enable;
+ u32 hw_csum_rx_error;
+ u32 oicr_idx; /* Other interrupt cause vector index */
+ u32 num_lan_msix; /* Total MSIX vectors for base driver */
+ u32 num_avail_msix; /* remaining MSIX vectors left unclaimed */
+ u16 num_lan_tx; /* num lan tx queues setup */
+ u16 num_lan_rx; /* num lan rx queues setup */
+ u16 q_left_tx; /* remaining num tx queues left unclaimed */
+ u16 q_left_rx; /* remaining num rx queues left unclaimed */
+ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
+ u16 num_alloc_vsi;
+ u16 corer_count; /* Core reset count */
+ u16 globr_count; /* Global reset count */
+ u16 empr_count; /* EMP reset count */
+ u16 pfr_count; /* PF reset count */
+
+ struct ice_hw_port_stats stats;
+ struct ice_hw_port_stats stats_prev;
+ struct ice_hw hw;
+ bool stat_prev_loaded; /* has previous stats been loaded */
+ char int_name[ICE_INT_NAME_STR_LEN];
+};
+
+struct ice_netdev_priv {
+ struct ice_vsi *vsi;
+};
+
+/**
+ * ice_irq_dynamic_ena - Enable default interrupt generation settings
+ * @hw: pointer to hw struct
+ * @vsi: pointer to vsi struct, can be NULL
+ * @q_vector: pointer to q_vector, can be NULL
+ */
+static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
+ struct ice_q_vector *q_vector)
+{
+ u32 vector = (vsi && q_vector) ? vsi->base_vector + q_vector->v_idx :
+ ((struct ice_pf *)hw->back)->oicr_idx;
+ int itr = ICE_ITR_NONE;
+ u32 val;
+
+ /* clear the PBA here, as this function is meant to clean out all
+ * previous interrupts and enable the interrupt
+ */
+ val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+ (itr << GLINT_DYN_CTL_ITR_INDX_S);
+ if (vsi)
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return;
+ wr32(hw, GLINT_DYN_CTL(vector), val);
+}
+
+static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
+{
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+}
+
+void ice_set_ethtool_ops(struct net_device *netdev);
+int ice_up(struct ice_vsi *vsi);
+int ice_down(struct ice_vsi *vsi);
+int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
+void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+
+#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
new file mode 100644
index 000000000000..5b13ca1bd85f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -0,0 +1,1352 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_ADMINQ_CMD_H_
+#define _ICE_ADMINQ_CMD_H_
+
+/* This header file defines the Admin Queue commands, error codes and
+ * descriptor format. It is shared between Firmware and Software.
+ */
+
+#define ICE_MAX_VSI 768
+#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
+#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
+
+struct ice_aqc_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get version (direct 0x0001) */
+struct ice_aqc_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+/* Queue Shutdown (direct 0x0003) */
+struct ice_aqc_q_shutdown {
+#define ICE_AQC_DRIVER_UNLOADING BIT(0)
+ __le32 driver_unloading;
+ u8 reserved[12];
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ice_aqc_req_res {
+ __le16 res_id;
+#define ICE_AQC_RES_ID_NVM 1
+#define ICE_AQC_RES_ID_SDP 2
+#define ICE_AQC_RES_ID_CHNG_LOCK 3
+#define ICE_AQC_RES_ID_GLBL_LOCK 4
+ __le16 access_type;
+#define ICE_AQC_RES_ACCESS_READ 1
+#define ICE_AQC_RES_ACCESS_WRITE 2
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin id of the SDP */
+ __le32 res_number;
+ /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
+ __le16 status;
+#define ICE_AQ_RES_GLBL_SUCCESS 0
+#define ICE_AQ_RES_GLBL_IN_PROG 1
+#define ICE_AQ_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ice_aqc_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ice_aqc_list_caps_elem {
+ __le16 cap;
+#define ICE_AQC_CAPS_VSI 0x0017
+#define ICE_AQC_CAPS_RSS 0x0040
+#define ICE_AQC_CAPS_RXQS 0x0041
+#define ICE_AQC_CAPS_TXQS 0x0042
+#define ICE_AQC_CAPS_MSIX 0x0043
+#define ICE_AQC_CAPS_MAX_MTU 0x0047
+
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+/* Manage MAC address, read command - indirect (0x0107)
+ * This struct is also used for the response
+ */
+struct ice_aqc_manage_mac_read {
+ __le16 flags; /* Zeroed by device driver */
+#define ICE_AQC_MAN_MAC_LAN_ADDR_VALID BIT(4)
+#define ICE_AQC_MAN_MAC_SAN_ADDR_VALID BIT(5)
+#define ICE_AQC_MAN_MAC_PORT_ADDR_VALID BIT(6)
+#define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7)
+#define ICE_AQC_MAN_MAC_READ_S 4
+#define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S)
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_MAN_MAC_PORT_NUM_IS_VALID BIT(0)
+ u8 num_addr; /* Used in response */
+ u8 reserved[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response buffer format for manage MAC read command */
+struct ice_aqc_manage_mac_read_resp {
+ u8 lport_num;
+ u8 addr_type;
+#define ICE_AQC_MAN_MAC_ADDR_TYPE_LAN 0
+#define ICE_AQC_MAN_MAC_ADDR_TYPE_WOL 1
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Manage MAC address, write command - direct (0x0108) */
+struct ice_aqc_manage_mac_write {
+ u8 port_num;
+ u8 flags;
+#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0)
+#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1)
+#define ICE_AQC_MAN_MAC_WR_S 6
+#define ICE_AQC_MAN_MAC_WR_M (3 << ICE_AQC_MAN_MAC_WR_S)
+#define ICE_AQC_MAN_MAC_UPDATE_LAA 0
+#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL (BIT(0) << ICE_AQC_MAN_MAC_WR_S)
+ /* High 16 bits of MAC address in big endian order */
+ __be16 sah;
+ /* Low 32 bits of MAC address in big endian order */
+ __be32 sal;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct ice_aqc_clear_pxe {
+ u8 rx_cnt;
+#define ICE_AQC_CLEAR_PXE_RX_CNT 0x2
+ u8 reserved[15];
+};
+
+/* Get switch configuration (0x0200) */
+struct ice_aqc_get_sw_cfg {
+ /* Reserved for command and copy of request flags for response */
+ __le16 flags;
+ /* First desc in case of command and next_elem in case of response
+ * In case of response, if it is not zero, means all the configuration
+ * was not returned and new command shall be sent with this value in
+ * the 'first desc' field
+ */
+ __le16 element;
+ /* Reserved for command, only used for response */
+ __le16 num_elems;
+ __le16 rsvd;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Each entry in the response buffer is of the following type: */
+struct ice_aqc_get_sw_cfg_resp_elem {
+ /* VSI/Port Number */
+ __le16 vsi_port_num;
+#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S 0
+#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M \
+ (0x3FF << ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S)
+#define ICE_AQC_GET_SW_CONF_RESP_TYPE_S 14
+#define ICE_AQC_GET_SW_CONF_RESP_TYPE_M (0x3 << ICE_AQC_GET_SW_CONF_RESP_TYPE_S)
+#define ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT 0
+#define ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT 1
+#define ICE_AQC_GET_SW_CONF_RESP_VSI 2
+
+ /* SWID VSI/Port belongs to */
+ __le16 swid;
+
+ /* Bit 14..0 : PF/VF number VSI belongs to
+ * Bit 15 : VF indication bit
+ */
+ __le16 pf_vf_num;
+#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S 0
+#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M \
+ (0x7FFF << ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S)
+#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
+};
+
+/* The response buffer is as follows. Note that the length of the
+ * elements array varies with the length of the command response.
+ */
+struct ice_aqc_get_sw_cfg_resp {
+ struct ice_aqc_get_sw_cfg_resp_elem elements[1];
+};
+
+/* These resource type defines are used for all switch resource
+ * commands where a resource type is required, such as:
+ * Get Resource Allocation command (indirect 0x0204)
+ * Allocate Resources command (indirect 0x0208)
+ * Free Resources command (indirect 0x0209)
+ * Get Allocated Resource Descriptors Command (indirect 0x020A)
+ */
+#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
+#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+
+/* Allocate Resources command (indirect 0x0208)
+ * Free Resources command (indirect 0x0209)
+ */
+struct ice_aqc_alloc_free_res_cmd {
+ __le16 num_entries; /* Number of Resource entries */
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Resource descriptor */
+struct ice_aqc_res_elem {
+ union {
+ __le16 sw_resp;
+ __le16 flu_resp;
+ } e;
+};
+
+/* Buffer for Allocate/Free Resources commands */
+struct ice_aqc_alloc_free_res_elem {
+ __le16 res_type; /* Types defined above cmd 0x0204 */
+#define ICE_AQC_RES_TYPE_SHARED_S 7
+#define ICE_AQC_RES_TYPE_SHARED_M (0x1 << ICE_AQC_RES_TYPE_SHARED_S)
+#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S 8
+#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \
+ (0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S)
+ __le16 num_elems;
+ struct ice_aqc_res_elem elem[1];
+};
+
+/* Add VSI (indirect 0x0210)
+ * Update VSI (indirect 0x0211)
+ * Get VSI (indirect 0x0212)
+ * Free VSI (indirect 0x0213)
+ */
+struct ice_aqc_add_get_update_free_vsi {
+ __le16 vsi_num;
+#define ICE_AQ_VSI_NUM_S 0
+#define ICE_AQ_VSI_NUM_M (0x03FF << ICE_AQ_VSI_NUM_S)
+#define ICE_AQ_VSI_IS_VALID BIT(15)
+ __le16 cmd_flags;
+#define ICE_AQ_VSI_KEEP_ALLOC 0x1
+ u8 vf_id;
+ u8 reserved;
+ __le16 vsi_flags;
+#define ICE_AQ_VSI_TYPE_S 0
+#define ICE_AQ_VSI_TYPE_M (0x3 << ICE_AQ_VSI_TYPE_S)
+#define ICE_AQ_VSI_TYPE_VF 0x0
+#define ICE_AQ_VSI_TYPE_VMDQ2 0x1
+#define ICE_AQ_VSI_TYPE_PF 0x2
+#define ICE_AQ_VSI_TYPE_EMP_MNG 0x3
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response descriptor for:
+ * Add VSI (indirect 0x0210)
+ * Update VSI (indirect 0x0211)
+ * Free VSI (indirect 0x0213)
+ */
+struct ice_aqc_add_update_free_vsi_resp {
+ __le16 vsi_num;
+ __le16 ext_status;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_vsi_props {
+ __le16 valid_sections;
+#define ICE_AQ_VSI_PROP_SW_VALID BIT(0)
+#define ICE_AQ_VSI_PROP_SECURITY_VALID BIT(1)
+#define ICE_AQ_VSI_PROP_VLAN_VALID BIT(2)
+#define ICE_AQ_VSI_PROP_OUTER_TAG_VALID BIT(3)
+#define ICE_AQ_VSI_PROP_INGRESS_UP_VALID BIT(4)
+#define ICE_AQ_VSI_PROP_EGRESS_UP_VALID BIT(5)
+#define ICE_AQ_VSI_PROP_RXQ_MAP_VALID BIT(6)
+#define ICE_AQ_VSI_PROP_Q_OPT_VALID BIT(7)
+#define ICE_AQ_VSI_PROP_OUTER_UP_VALID BIT(8)
+#define ICE_AQ_VSI_PROP_FLOW_DIR_VALID BIT(11)
+#define ICE_AQ_VSI_PROP_PASID_VALID BIT(12)
+ /* switch section */
+ u8 sw_id;
+ u8 sw_flags;
+#define ICE_AQ_VSI_SW_FLAG_ALLOW_LB BIT(5)
+#define ICE_AQ_VSI_SW_FLAG_LOCAL_LB BIT(6)
+#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7)
+ u8 sw_flags2;
+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0
+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \
+ (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
+#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0)
+#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4)
+ u8 veb_stat_id;
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5)
+ /* security section */
+ u8 sec_flags;
+#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0)
+#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2)
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
+#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0)
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ u8 pvlan_reserved[2];
+ u8 port_vlan_flags;
+#define ICE_AQ_VSI_PVLAN_MODE_S 0
+#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S)
+#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1
+#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2
+#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3
+#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
+#define ICE_AQ_VSI_PVLAN_EMOD_S 3
+#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
+#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S)
+#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S)
+#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S)
+#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
+ u8 pvlan_reserved2[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
+#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
+#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
+#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
+#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
+#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
+#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
+#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
+#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
+#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
+#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
+#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
+#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
+#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
+#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
+#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* outer tags section */
+ __le16 outer_tag;
+ u8 outer_tag_flags;
+#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0
+#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S)
+#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0
+#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1
+#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
+#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
+#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
+#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4)
+#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6)
+ u8 outer_tag_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
+#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
+ __le16 q_mapping[16];
+#define ICE_AQ_VSI_Q_S 0
+#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
+ __le16 tc_mapping[8];
+#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
+#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
+#define ICE_AQ_VSI_TC_Q_NUM_S 11
+#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
+ /* queueing option section */
+ u8 q_opt_rss;
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+ u8 q_opt_tc;
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
+#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
+ u8 q_opt_flags;
+#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
+ u8 q_opt_reserved[3];
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ /* section 10 */
+ __le16 sect_10_reserved;
+ /* flow director section */
+ __le16 fd_options;
+#define ICE_AQ_VSI_FD_ENABLE BIT(0)
+#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
+#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
+ __le16 max_fd_fltr_dedicated;
+ __le16 max_fd_fltr_shared;
+ __le16 fd_def_q;
+#define ICE_AQ_VSI_FD_DEF_Q_S 0
+#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
+#define ICE_AQ_VSI_FD_DEF_GRP_S 12
+#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
+ __le16 fd_report_opt;
+#define ICE_AQ_VSI_FD_REPORT_Q_S 0
+#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
+#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
+ /* PASID section */
+ __le32 pasid_id;
+#define ICE_AQ_VSI_PASID_ID_S 0
+#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
+#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
+ u8 reserved[24];
+};
+
+/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
+ */
+struct ice_aqc_sw_rules {
+ /* ops: add switch rules, referring the number of rules.
+ * ops: update switch rules, referring the number of filters
+ * ops: remove switch rules, referring the entry index.
+ * ops: get switch rules, referring to the number of filters.
+ */
+ __le16 num_rules_fltr_entry_index;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
+ * This structures describes the lookup rules and associated actions. "index"
+ * is returned as part of a response to a successful Add command, and can be
+ * used to identify the rule for Update/Get/Remove commands.
+ */
+struct ice_sw_rule_lkup_rx_tx {
+ __le16 recipe_id;
+#define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10
+ /* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */
+ __le16 src;
+ __le32 act;
+
+ /* Bit 0:1 - Action type */
+#define ICE_SINGLE_ACT_TYPE_S 0x00
+#define ICE_SINGLE_ACT_TYPE_M (0x3 << ICE_SINGLE_ACT_TYPE_S)
+
+ /* Bit 2 - Loop back enable
+ * Bit 3 - LAN enable
+ */
+#define ICE_SINGLE_ACT_LB_ENABLE BIT(2)
+#define ICE_SINGLE_ACT_LAN_ENABLE BIT(3)
+
+ /* Action type = 0 - Forward to VSI or VSI list */
+#define ICE_SINGLE_ACT_VSI_FORWARDING 0x0
+
+#define ICE_SINGLE_ACT_VSI_ID_S 4
+#define ICE_SINGLE_ACT_VSI_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_ID_S)
+#define ICE_SINGLE_ACT_VSI_LIST_ID_S 4
+#define ICE_SINGLE_ACT_VSI_LIST_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_LIST_ID_S)
+ /* This bit needs to be set if action is forward to VSI list */
+#define ICE_SINGLE_ACT_VSI_LIST BIT(14)
+#define ICE_SINGLE_ACT_VALID_BIT BIT(17)
+#define ICE_SINGLE_ACT_DROP BIT(18)
+
+ /* Action type = 1 - Forward to Queue of Queue group */
+#define ICE_SINGLE_ACT_TO_Q 0x1
+#define ICE_SINGLE_ACT_Q_INDEX_S 4
+#define ICE_SINGLE_ACT_Q_INDEX_M (0x7FF << ICE_SINGLE_ACT_Q_INDEX_S)
+#define ICE_SINGLE_ACT_Q_REGION_S 15
+#define ICE_SINGLE_ACT_Q_REGION_M (0x7 << ICE_SINGLE_ACT_Q_REGION_S)
+#define ICE_SINGLE_ACT_Q_PRIORITY BIT(18)
+
+ /* Action type = 2 - Prune */
+#define ICE_SINGLE_ACT_PRUNE 0x2
+#define ICE_SINGLE_ACT_EGRESS BIT(15)
+#define ICE_SINGLE_ACT_INGRESS BIT(16)
+#define ICE_SINGLE_ACT_PRUNET BIT(17)
+ /* Bit 18 should be set to 0 for this action */
+
+ /* Action type = 2 - Pointer */
+#define ICE_SINGLE_ACT_PTR 0x2
+#define ICE_SINGLE_ACT_PTR_VAL_S 4
+#define ICE_SINGLE_ACT_PTR_VAL_M (0x1FFF << ICE_SINGLE_ACT_PTR_VAL_S)
+ /* Bit 18 should be set to 1 */
+#define ICE_SINGLE_ACT_PTR_BIT BIT(18)
+
+ /* Action type = 3 - Other actions. Last two bits
+ * are other action identifier
+ */
+#define ICE_SINGLE_ACT_OTHER_ACTS 0x3
+#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17
+#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \
+ (0x3 << \ ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
+
+ /* Bit 17:18 - Defines other actions */
+ /* Other action = 0 - Mirror VSI */
+#define ICE_SINGLE_OTHER_ACT_MIRROR 0
+#define ICE_SINGLE_ACT_MIRROR_VSI_ID_S 4
+#define ICE_SINGLE_ACT_MIRROR_VSI_ID_M \
+ (0x3FF << ICE_SINGLE_ACT_MIRROR_VSI_ID_S)
+
+ /* Other action = 3 - Set Stat count */
+#define ICE_SINGLE_OTHER_ACT_STAT_COUNT 3
+#define ICE_SINGLE_ACT_STAT_COUNT_INDEX_S 4
+#define ICE_SINGLE_ACT_STAT_COUNT_INDEX_M \
+ (0x7F << ICE_SINGLE_ACT_STAT_COUNT_INDEX_S)
+
+ __le16 index; /* The index of the rule in the lookup table */
+ /* Length and values of the header to be matched per recipe or
+ * lookup-type
+ */
+ __le16 hdr_len;
+ u8 hdr[1];
+} __packed;
+
+/* Add/Update/Remove large action command/response entry
+ * "index" is returned as part of a response to a successful Add command, and
+ * can be used to identify the action for Update/Get/Remove commands.
+ */
+struct ice_sw_rule_lg_act {
+ __le16 index; /* Index in large action table */
+ __le16 size;
+ __le32 act[1]; /* array of size for actions */
+ /* Max number of large actions */
+#define ICE_MAX_LG_ACT 4
+ /* Bit 0:1 - Action type */
+#define ICE_LG_ACT_TYPE_S 0
+#define ICE_LG_ACT_TYPE_M (0x7 << ICE_LG_ACT_TYPE_S)
+
+ /* Action type = 0 - Forward to VSI or VSI list */
+#define ICE_LG_ACT_VSI_FORWARDING 0
+#define ICE_LG_ACT_VSI_ID_S 3
+#define ICE_LG_ACT_VSI_ID_M (0x3FF << ICE_LG_ACT_VSI_ID_S)
+#define ICE_LG_ACT_VSI_LIST_ID_S 3
+#define ICE_LG_ACT_VSI_LIST_ID_M (0x3FF << ICE_LG_ACT_VSI_LIST_ID_S)
+ /* This bit needs to be set if action is forward to VSI list */
+#define ICE_LG_ACT_VSI_LIST BIT(13)
+
+#define ICE_LG_ACT_VALID_BIT BIT(16)
+
+ /* Action type = 1 - Forward to Queue of Queue group */
+#define ICE_LG_ACT_TO_Q 0x1
+#define ICE_LG_ACT_Q_INDEX_S 3
+#define ICE_LG_ACT_Q_INDEX_M (0x7FF << ICE_LG_ACT_Q_INDEX_S)
+#define ICE_LG_ACT_Q_REGION_S 14
+#define ICE_LG_ACT_Q_REGION_M (0x7 << ICE_LG_ACT_Q_REGION_S)
+#define ICE_LG_ACT_Q_PRIORITY_SET BIT(17)
+
+ /* Action type = 2 - Prune */
+#define ICE_LG_ACT_PRUNE 0x2
+#define ICE_LG_ACT_EGRESS BIT(14)
+#define ICE_LG_ACT_INGRESS BIT(15)
+#define ICE_LG_ACT_PRUNET BIT(16)
+
+ /* Action type = 3 - Mirror VSI */
+#define ICE_LG_OTHER_ACT_MIRROR 0x3
+#define ICE_LG_ACT_MIRROR_VSI_ID_S 3
+#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
+
+ /* Action type = 5 - Large Action */
+#define ICE_LG_ACT_GENERIC 0x5
+#define ICE_LG_ACT_GENERIC_VALUE_S 3
+#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
+#define ICE_LG_ACT_GENERIC_OFFSET_S 19
+#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
+#define ICE_LG_ACT_GENERIC_PRIORITY_S 22
+#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
+
+ /* Action = 7 - Set Stat count */
+#define ICE_LG_ACT_STAT_COUNT 0x7
+#define ICE_LG_ACT_STAT_COUNT_S 3
+#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
+};
+
+/* Add/Update/Remove VSI list command/response entry
+ * "index" is returned as part of a response to a successful Add command, and
+ * can be used to identify the VSI list for Update/Get/Remove commands.
+ */
+struct ice_sw_rule_vsi_list {
+ __le16 index; /* Index of VSI/Prune list */
+ __le16 number_vsi;
+ __le16 vsi[1]; /* Array of number_vsi VSI numbers */
+};
+
+/* Query VSI list command/response entry */
+struct ice_sw_rule_vsi_list_query {
+ __le16 index;
+ DECLARE_BITMAP(vsi_list, ICE_MAX_VSI);
+} __packed;
+
+/* Add switch rule response:
+ * Content of return buffer is same as the input buffer. The status field and
+ * LUT index are updated as part of the response
+ */
+struct ice_aqc_sw_rules_elem {
+ __le16 type; /* Switch rule type, one of T_... */
+#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
+#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
+#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
+#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
+#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
+ __le16 status;
+ union {
+ struct ice_sw_rule_lkup_rx_tx lkup_tx_rx;
+ struct ice_sw_rule_lg_act lg_act;
+ struct ice_sw_rule_vsi_list vsi_list;
+ struct ice_sw_rule_vsi_list_query vsi_list_query;
+ } __packed pdata;
+};
+
+/* Get Default Topology (indirect 0x0400) */
+struct ice_aqc_get_topo {
+ u8 port_num;
+ u8 num_branches;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Update TSE (indirect 0x0403)
+ * Get TSE (indirect 0x0404)
+ */
+struct ice_aqc_get_cfg_elem {
+ __le16 num_elem_req; /* Used by commands */
+ __le16 num_elem_resp; /* Used by responses */
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the buffer for:
+ * Suspend Nodes (indirect 0x0409)
+ * Resume Nodes (indirect 0x040A)
+ */
+struct ice_aqc_suspend_resume_elem {
+ __le32 teid[1];
+};
+
+/* Add TSE (indirect 0x0401)
+ * Delete TSE (indirect 0x040F)
+ * Move TSE (indirect 0x0408)
+ */
+struct ice_aqc_add_move_delete_elem {
+ __le16 num_grps_req;
+ __le16 num_grps_updated;
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_elem_info_bw {
+ __le16 bw_profile_idx;
+ __le16 bw_alloc;
+};
+
+struct ice_aqc_txsched_elem {
+ u8 elem_type; /* Special field, reserved for some aq calls */
+#define ICE_AQC_ELEM_TYPE_UNDEFINED 0x0
+#define ICE_AQC_ELEM_TYPE_ROOT_PORT 0x1
+#define ICE_AQC_ELEM_TYPE_TC 0x2
+#define ICE_AQC_ELEM_TYPE_SE_GENERIC 0x3
+#define ICE_AQC_ELEM_TYPE_ENTRY_POINT 0x4
+#define ICE_AQC_ELEM_TYPE_LEAF 0x5
+#define ICE_AQC_ELEM_TYPE_SE_PADDED 0x6
+ u8 valid_sections;
+#define ICE_AQC_ELEM_VALID_GENERIC BIT(0)
+#define ICE_AQC_ELEM_VALID_CIR BIT(1)
+#define ICE_AQC_ELEM_VALID_EIR BIT(2)
+#define ICE_AQC_ELEM_VALID_SHARED BIT(3)
+ u8 generic;
+#define ICE_AQC_ELEM_GENERIC_MODE_M 0x1
+#define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1
+#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S)
+#define ICE_AQC_ELEM_GENERIC_SP_S 0x4
+#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S)
+#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5
+#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \
+ (0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S)
+ u8 flags; /* Special field, reserved for some aq calls */
+#define ICE_AQC_ELEM_FLAG_SUSPEND_M 0x1
+ struct ice_aqc_elem_info_bw cir_bw;
+ struct ice_aqc_elem_info_bw eir_bw;
+ __le16 srl_id;
+ __le16 reserved2;
+};
+
+struct ice_aqc_txsched_elem_data {
+ __le32 parent_teid;
+ __le32 node_teid;
+ struct ice_aqc_txsched_elem data;
+};
+
+struct ice_aqc_txsched_topo_grp_info_hdr {
+ __le32 parent_teid;
+ __le16 num_elems;
+ __le16 reserved2;
+};
+
+struct ice_aqc_add_elem {
+ struct ice_aqc_txsched_topo_grp_info_hdr hdr;
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
+struct ice_aqc_get_topo_elem {
+ struct ice_aqc_txsched_topo_grp_info_hdr hdr;
+ struct ice_aqc_txsched_elem_data
+ generic[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+};
+
+struct ice_aqc_delete_elem {
+ struct ice_aqc_txsched_topo_grp_info_hdr hdr;
+ __le32 teid[1];
+};
+
+/* Query Scheduler Resource Allocation (indirect 0x0412)
+ * This indirect command retrieves the scheduler resources allocated by
+ * EMP Firmware to the given PF.
+ */
+struct ice_aqc_query_txsched_res {
+ u8 reserved[8];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_generic_sched_props {
+ __le16 phys_levels;
+ __le16 logical_levels;
+ u8 flattening_bitmap;
+ u8 max_device_cgds;
+ u8 max_pf_cgds;
+ u8 rsvd0;
+ __le16 rdma_qsets;
+ u8 rsvd1[22];
+};
+
+struct ice_aqc_layer_props {
+ u8 logical_layer;
+ u8 chunk_size;
+ __le16 max_device_nodes;
+ __le16 max_pf_nodes;
+ u8 rsvd0[2];
+ __le16 max_shared_rate_lmtr;
+ __le16 max_children;
+ __le16 max_cir_rl_profiles;
+ __le16 max_eir_rl_profiles;
+ __le16 max_srl_profiles;
+ u8 rsvd1[14];
+};
+
+struct ice_aqc_query_txsched_res_resp {
+ struct ice_aqc_generic_sched_props sched_props;
+ struct ice_aqc_layer_props layer_props[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+};
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ice_aqc_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define ICE_AQC_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.2 : Report mode
+ * 00b - Report NVM capabilities
+ * 01b - Report topology capabilities
+ * 10b - Report SW configured
+ */
+#define ICE_AQC_REPORT_MODE_S 1
+#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S)
+#define ICE_AQC_REPORT_NVM_CAP 0
+#define ICE_AQC_REPORT_TOPO_CAP BIT(1)
+#define ICE_AQC_REPORT_SW_CFG BIT(2)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define ICE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define ICE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define ICE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define ICE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define ICE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define ICE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define ICE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define ICE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define ICE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define ICE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define ICE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define ICE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define ICE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define ICE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define ICE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define ICE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define ICE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define ICE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19)
+#define ICE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20)
+#define ICE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21)
+#define ICE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22)
+#define ICE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23)
+#define ICE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24)
+#define ICE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25)
+#define ICE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26)
+#define ICE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27)
+#define ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28)
+#define ICE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29)
+#define ICE_PHY_TYPE_LOW_40GBASE_CR4 BIT_ULL(30)
+#define ICE_PHY_TYPE_LOW_40GBASE_SR4 BIT_ULL(31)
+#define ICE_PHY_TYPE_LOW_40GBASE_LR4 BIT_ULL(32)
+#define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33)
+#define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34)
+#define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35)
+#define ICE_PHY_TYPE_LOW_MAX_INDEX 63
+
+struct ice_aqc_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 reserved;
+ u8 caps;
+#define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define ICE_AQC_PHY_LOW_POWER_MODE BIT(2)
+#define ICE_AQC_PHY_EN_LINK BIT(3)
+#define ICE_AQC_PHY_AN_MODE BIT(4)
+#define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5)
+ u8 low_power_ctrl;
+#define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+ __le16 eee_cap;
+#define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0)
+#define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1)
+#define ICE_AQC_PHY_EEE_EN_10GBASE_T BIT(2)
+#define ICE_AQC_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define ICE_AQC_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define ICE_AQC_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 link_fec_options;
+#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3)
+#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+ u8 extended_compliance_code;
+#define ICE_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
+#define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+#define ICE_AQC_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd1;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd8;
+ } qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX];
+};
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ice_aqc_set_phy_cfg {
+ u8 lport_num;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set PHY config command data structure */
+struct ice_aqc_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 rsvd0;
+ u8 caps;
+#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
+#define ICE_AQ_PHY_ENA_LINK BIT(3)
+#define ICE_AQ_PHY_ENA_ATOMIC_LINK BIT(5)
+ u8 low_power_ctrl;
+ __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */
+ __le16 eeer_value;
+ u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */
+ u8 rsvd1;
+};
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ice_aqc_restart_an {
+ u8 lport_num;
+ u8 reserved;
+ u8 cmd_flags;
+#define ICE_AQC_RESTART_AN_LINK_RESTART BIT(1)
+#define ICE_AQC_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ice_aqc_get_link_status {
+ u8 lport_num;
+ u8 reserved;
+ __le16 cmd_flags;
+#define ICE_AQ_LSE_M 0x3
+#define ICE_AQ_LSE_NOP 0x0
+#define ICE_AQ_LSE_DIS 0x2
+#define ICE_AQ_LSE_ENA 0x3
+ /* only response uses this flag */
+#define ICE_AQ_LSE_IS_ENABLED 0x1
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ice_aqc_get_link_status_data {
+ u8 topo_media_conflict;
+#define ICE_AQ_LINK_TOPO_CONFLICT BIT(0)
+#define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1)
+#define ICE_AQ_LINK_TOPO_CORRUPT BIT(2)
+ u8 reserved1;
+ u8 link_info;
+#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
+#define ICE_AQ_LINK_FAULT BIT(1)
+#define ICE_AQ_LINK_FAULT_TX BIT(2)
+#define ICE_AQ_LINK_FAULT_RX BIT(3)
+#define ICE_AQ_LINK_FAULT_REMOTE BIT(4)
+#define ICE_AQ_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define ICE_AQ_MEDIA_AVAILABLE BIT(6)
+#define ICE_AQ_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define ICE_AQ_AN_COMPLETED BIT(0)
+#define ICE_AQ_LP_AN_ABILITY BIT(1)
+#define ICE_AQ_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define ICE_AQ_FEC_EN BIT(3)
+#define ICE_AQ_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define ICE_AQ_LINK_PAUSE_TX BIT(5)
+#define ICE_AQ_LINK_PAUSE_RX BIT(6)
+#define ICE_AQ_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0)
+#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port TX Suspended */
+#define ICE_AQ_LINK_TX_S 2
+#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S)
+#define ICE_AQ_LINK_TX_ACTIVE 0
+#define ICE_AQ_LINK_TX_DRAINED 1
+#define ICE_AQ_LINK_TX_FLUSHED 3
+ u8 reserved2;
+ __le16 max_frame_size;
+ u8 cfg;
+#define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0)
+#define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1)
+#define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2)
+ /* Pacing Config */
+#define ICE_AQ_CFG_PACING_S 3
+#define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S)
+#define ICE_AQ_CFG_PACING_TYPE_M BIT(7)
+#define ICE_AQ_CFG_PACING_TYPE_AVG 0
+#define ICE_AQ_CFG_PACING_TYPE_FIXED ICE_AQ_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define ICE_AQ_PWR_CLASS_M 0x3
+#define ICE_AQ_LINK_PWR_BASET_LOW_HIGH 0
+#define ICE_AQ_LINK_PWR_BASET_HIGH 1
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_1 0
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_2 1
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define ICE_AQ_LINK_SPEED_10MB BIT(0)
+#define ICE_AQ_LINK_SPEED_100MB BIT(1)
+#define ICE_AQ_LINK_SPEED_1000MB BIT(2)
+#define ICE_AQ_LINK_SPEED_2500MB BIT(3)
+#define ICE_AQ_LINK_SPEED_5GB BIT(4)
+#define ICE_AQ_LINK_SPEED_10GB BIT(5)
+#define ICE_AQ_LINK_SPEED_20GB BIT(6)
+#define ICE_AQ_LINK_SPEED_25GB BIT(7)
+#define ICE_AQ_LINK_SPEED_40GB BIT(8)
+#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
+ __le32 reserved3; /* Aligns next field to 8-byte boundary */
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 reserved4;
+};
+
+/* Set event mask command (direct 0x0613) */
+struct ice_aqc_set_event_mask {
+ u8 lport_num;
+ u8 reserved[7];
+ __le16 event_mask;
+#define ICE_AQ_LINK_EVENT_UPDOWN BIT(1)
+#define ICE_AQ_LINK_EVENT_MEDIA_NA BIT(2)
+#define ICE_AQ_LINK_EVENT_LINK_FAULT BIT(3)
+#define ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define ICE_AQ_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7)
+#define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+ u8 reserved1[6];
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct ice_aqc_nvm {
+ u8 cmd_flags;
+#define ICE_AQC_NVM_LAST_CMD BIT(0)
+#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */
+#define ICE_AQC_NVM_PRESERVATION_S 1
+#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
+#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
+ u8 module_typeid;
+ __le16 length;
+#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
+struct ice_aqc_get_set_rss_key {
+#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
+#define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0
+#define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28
+#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC
+
+struct ice_aqc_get_set_rss_keys {
+ u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
+ u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE];
+};
+
+/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
+struct ice_aqc_get_set_rss_lut {
+#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
+ __le16 vsi_id;
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
+ (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S)
+
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2
+
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \
+ (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S)
+
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2
+
+#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4
+#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \
+ (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S)
+
+ __le16 flags;
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Add TX LAN Queues (indirect 0x0C30) */
+struct ice_aqc_add_txqs {
+ u8 num_qgrps;
+ u8 reserved[3];
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each queue entry for the Add TX LAN Queues
+ * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.
+ */
+struct ice_aqc_add_txqs_perq {
+ __le16 txq_id;
+ u8 rsvd[2];
+ __le32 q_teid;
+ u8 txq_ctx[22];
+ u8 rsvd2[2];
+ struct ice_aqc_txsched_elem info;
+};
+
+/* The format of the command buffer for Add TX LAN Queues (0x0C30)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_add_tx_qgrp is variable due
+ * to the variable number of queues in each group!
+ */
+struct ice_aqc_add_tx_qgrp {
+ __le32 parent_teid;
+ u8 num_txqs;
+ u8 rsvd[3];
+ struct ice_aqc_add_txqs_perq txqs[1];
+};
+
+/* Disable TX LAN Queues (indirect 0x0C31) */
+struct ice_aqc_dis_txqs {
+ u8 cmd_type;
+#define ICE_AQC_Q_DIS_CMD_S 0
+#define ICE_AQC_Q_DIS_CMD_M (0x3 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_NO_FUNC_RESET (0 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_VM_RESET BIT(ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_VF_RESET (2 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_PF_RESET (3 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_SUBSEQ_CALL BIT(2)
+#define ICE_AQC_Q_DIS_CMD_FLUSH_PIPE BIT(3)
+ u8 num_entries;
+ __le16 vmvf_and_timeout;
+#define ICE_AQC_Q_DIS_VMVF_NUM_S 0
+#define ICE_AQC_Q_DIS_VMVF_NUM_M (0x3FF << ICE_AQC_Q_DIS_VMVF_NUM_S)
+#define ICE_AQC_Q_DIS_TIMEOUT_S 10
+#define ICE_AQC_Q_DIS_TIMEOUT_M (0x3F << ICE_AQC_Q_DIS_TIMEOUT_S)
+ __le32 blocked_cgds;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* The buffer for Disable TX LAN Queues (indirect 0x0C31)
+ * contains the following structures, arrayed one after the
+ * other.
+ * Note: Since the q_id is 16 bits wide, if the
+ * number of queues is even, then 2 bytes of alignment MUST be
+ * added before the start of the next group, to allow correct
+ * alignment of the parent_teid field.
+ */
+struct ice_aqc_dis_txq_item {
+ __le32 parent_teid;
+ u8 num_qs;
+ u8 rsvd;
+ /* The length of the q_id array varies according to num_qs */
+ __le16 q_id[1];
+ /* This only applies from F8 onward */
+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15
+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \
+ (0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \
+ (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
+};
+
+struct ice_aqc_dis_txq {
+ struct ice_aqc_dis_txq_item qgrps[1];
+};
+
+/**
+ * struct ice_aq_desc - Admin Queue (AQ) descriptor
+ * @flags: ICE_AQ_FLAG_* flags
+ * @opcode: AQ command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_h: opaque data high-half
+ * @cookie_l: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts on the Admin Transmit Queue
+ * (ATQ). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Receive Queue (ARQ) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ice_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ice_aqc_generic generic;
+ struct ice_aqc_get_ver get_ver;
+ struct ice_aqc_q_shutdown q_shutdown;
+ struct ice_aqc_req_res res_owner;
+ struct ice_aqc_manage_mac_read mac_read;
+ struct ice_aqc_manage_mac_write mac_write;
+ struct ice_aqc_clear_pxe clear_pxe;
+ struct ice_aqc_list_caps get_cap;
+ struct ice_aqc_get_phy_caps get_phy;
+ struct ice_aqc_set_phy_cfg set_phy;
+ struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_get_sw_cfg get_sw_conf;
+ struct ice_aqc_sw_rules sw_rules;
+ struct ice_aqc_get_topo get_topo;
+ struct ice_aqc_get_cfg_elem get_update_elem;
+ struct ice_aqc_query_txsched_res query_sched_res;
+ struct ice_aqc_add_move_delete_elem add_move_delete_elem;
+ struct ice_aqc_nvm nvm;
+ struct ice_aqc_get_set_rss_lut get_set_rss_lut;
+ struct ice_aqc_get_set_rss_key get_set_rss_key;
+ struct ice_aqc_add_txqs add_txqs;
+ struct ice_aqc_dis_txqs dis_txqs;
+ struct ice_aqc_add_get_update_free_vsi vsi_cmd;
+ struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
+ struct ice_aqc_set_event_mask set_event_mask;
+ struct ice_aqc_get_link_status get_link_status;
+ } params;
+};
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define ICE_AQ_LG_BUF 512
+
+#define ICE_AQ_FLAG_ERR_S 2
+#define ICE_AQ_FLAG_LB_S 9
+#define ICE_AQ_FLAG_RD_S 10
+#define ICE_AQ_FLAG_BUF_S 12
+#define ICE_AQ_FLAG_SI_S 13
+
+#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
+#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
+#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
+#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */
+#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */
+
+/* error codes */
+enum ice_aq_err {
+ ICE_AQ_RC_OK = 0, /* success */
+ ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
+ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ ICE_AQ_RC_EEXIST = 13, /* object already exists */
+ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
+};
+
+/* Admin Queue command opcodes */
+enum ice_adminq_opc {
+ /* AQ commands */
+ ice_aqc_opc_get_ver = 0x0001,
+ ice_aqc_opc_q_shutdown = 0x0003,
+
+ /* resource ownership */
+ ice_aqc_opc_req_res = 0x0008,
+ ice_aqc_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ice_aqc_opc_list_func_caps = 0x000A,
+ ice_aqc_opc_list_dev_caps = 0x000B,
+
+ /* manage MAC address */
+ ice_aqc_opc_manage_mac_read = 0x0107,
+ ice_aqc_opc_manage_mac_write = 0x0108,
+
+ /* PXE */
+ ice_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* internal switch commands */
+ ice_aqc_opc_get_sw_cfg = 0x0200,
+
+ /* Alloc/Free/Get Resources */
+ ice_aqc_opc_alloc_res = 0x0208,
+ ice_aqc_opc_free_res = 0x0209,
+
+ /* VSI commands */
+ ice_aqc_opc_add_vsi = 0x0210,
+ ice_aqc_opc_update_vsi = 0x0211,
+ ice_aqc_opc_free_vsi = 0x0213,
+
+ /* switch rules population commands */
+ ice_aqc_opc_add_sw_rules = 0x02A0,
+ ice_aqc_opc_update_sw_rules = 0x02A1,
+ ice_aqc_opc_remove_sw_rules = 0x02A2,
+
+ ice_aqc_opc_clear_pf_cfg = 0x02A4,
+
+ /* transmit scheduler commands */
+ ice_aqc_opc_get_dflt_topo = 0x0400,
+ ice_aqc_opc_add_sched_elems = 0x0401,
+ ice_aqc_opc_suspend_sched_elems = 0x0409,
+ ice_aqc_opc_resume_sched_elems = 0x040A,
+ ice_aqc_opc_delete_sched_elems = 0x040F,
+ ice_aqc_opc_query_sched_res = 0x0412,
+
+ /* PHY commands */
+ ice_aqc_opc_get_phy_caps = 0x0600,
+ ice_aqc_opc_set_phy_cfg = 0x0601,
+ ice_aqc_opc_restart_an = 0x0605,
+ ice_aqc_opc_get_link_status = 0x0607,
+ ice_aqc_opc_set_event_mask = 0x0613,
+
+ /* NVM commands */
+ ice_aqc_opc_nvm_read = 0x0701,
+
+ /* RSS commands */
+ ice_aqc_opc_set_rss_key = 0x0B02,
+ ice_aqc_opc_set_rss_lut = 0x0B03,
+ ice_aqc_opc_get_rss_key = 0x0B04,
+ ice_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* TX queue handling commands/events */
+ ice_aqc_opc_add_txqs = 0x0C30,
+ ice_aqc_opc_dis_txqs = 0x0C31,
+};
+
+#endif /* _ICE_ADMINQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
new file mode 100644
index 000000000000..385f5d425d19
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -0,0 +1,2233 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_sched.h"
+#include "ice_adminq_cmd.h"
+
+#define ICE_PF_RESET_WAIT_COUNT 200
+
+#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \
+ wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \
+ ((ICE_RX_OPC_MDID << \
+ GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
+ GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
+ (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
+ GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
+
+#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \
+ wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \
+ (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
+ GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
+ (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
+ GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
+ (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
+ GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
+ (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
+ GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
+
+/**
+ * ice_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the MAC type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ */
+static enum ice_status ice_set_mac_type(struct ice_hw *hw)
+{
+ if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
+ return ICE_ERR_DEVICE_NOT_SUPPORTED;
+
+ hw->mac_type = ICE_MAC_GENERIC;
+ return 0;
+}
+
+/**
+ * ice_clear_pf_cfg - Clear PF configuration
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_manage_mac_read - manage MAC address read command
+ * @hw: pointer to the hw struct
+ * @buf: a virtual buffer to hold the manage MAC read response
+ * @buf_size: Size of the virtual buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function is used to return per PF station MAC address (0x0107).
+ * NOTE: Upon successful completion of this command, MAC address information
+ * is returned in user specified buffer. Please interpret user specified
+ * buffer as "manage_mac_read" response.
+ * Response such as various MAC addresses are stored in HW struct (port.mac)
+ * ice_aq_discover_caps is expected to be called before this function is called.
+ */
+static enum ice_status
+ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_manage_mac_read_resp *resp;
+ struct ice_aqc_manage_mac_read *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 flags;
+
+ cmd = &desc.params.mac_read;
+
+ if (buf_size < sizeof(*resp))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ return status;
+
+ resp = (struct ice_aqc_manage_mac_read_resp *)buf;
+ flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
+
+ if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
+ ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
+ return ICE_ERR_CFG;
+ }
+
+ ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr);
+ ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr);
+ return 0;
+}
+
+/**
+ * ice_aq_get_phy_caps - returns PHY capabilities
+ * @pi: port information structure
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ * @cd: pointer to command details structure or NULL
+ *
+ * Returns the various PHY capabilities supported on the Port (0x0600)
+ */
+static enum ice_status
+ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
+ struct ice_aqc_get_phy_caps_data *pcaps,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
+
+ cmd->param0 |= cpu_to_le16(report_mode);
+ status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
+
+ if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
+ pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
+
+ return status;
+}
+
+/**
+ * ice_get_media_type - Gets media type
+ * @pi: port information structure
+ */
+static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
+{
+ struct ice_link_status *hw_link_info;
+
+ if (!pi)
+ return ICE_MEDIA_UNKNOWN;
+
+ hw_link_info = &pi->phy.link_info;
+
+ if (hw_link_info->phy_type_low) {
+ switch (hw_link_info->phy_type_low) {
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ return ICE_MEDIA_FIBER;
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ return ICE_MEDIA_BASET;
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ return ICE_MEDIA_DA;
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ return ICE_MEDIA_BACKPLANE;
+ }
+ }
+
+ return ICE_MEDIA_UNKNOWN;
+}
+
+/**
+ * ice_aq_get_link_info
+ * @pi: port information structure
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Link Status (0x607). Returns the link status of the adapter.
+ */
+enum ice_status
+ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
+ struct ice_link_status *link, struct ice_sq_cd *cd)
+{
+ struct ice_link_status *hw_link_info_old, *hw_link_info;
+ struct ice_aqc_get_link_status_data link_data = { 0 };
+ struct ice_aqc_get_link_status *resp;
+ enum ice_media_type *hw_media_type;
+ struct ice_fc_info *hw_fc_info;
+ bool tx_pause, rx_pause;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 cmd_flags;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ hw_link_info_old = &pi->phy.link_info_old;
+ hw_media_type = &pi->phy.media_type;
+ hw_link_info = &pi->phy.link_info;
+ hw_fc_info = &pi->fc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
+ cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = cpu_to_le16(cmd_flags);
+ resp->lport_num = pi->lport;
+
+ status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
+ cd);
+
+ if (status)
+ return status;
+
+ /* save off old link status information */
+ *hw_link_info_old = *hw_link_info;
+
+ /* update current link status information */
+ hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
+ hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ *hw_media_type = ice_get_media_type(pi);
+ hw_link_info->link_info = link_data.link_info;
+ hw_link_info->an_info = link_data.an_info;
+ hw_link_info->ext_info = link_data.ext_info;
+ hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
+
+ /* update fc info */
+ tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ICE_FC_FULL;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
+ else
+ hw_fc_info->current_mode = ICE_FC_NONE;
+
+ hw_link_info->lse_ena =
+ !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
+
+ /* save link status information */
+ if (link)
+ *link = *hw_link_info;
+
+ /* flag cleared so calling functions don't call AQ again */
+ pi->phy.get_link_info = false;
+
+ return status;
+}
+
+/**
+ * ice_init_flex_parser - initialize rx flex parser
+ * @hw: pointer to the hardware structure
+ *
+ * Function to initialize flex descriptors
+ */
+static void ice_init_flex_parser(struct ice_hw *hw)
+{
+ u8 idx = 0;
+
+ ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0);
+ ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1);
+ ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2);
+ ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3);
+ ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++);
+ ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
+ ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI,
+ ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100,
+ idx++);
+ ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN,
+ ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++);
+ ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
+ ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+}
+
+/**
+ * ice_init_fltr_mgmt_struct - initializes filter management list and locks
+ * @hw: pointer to the hw struct
+ */
+static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw;
+
+ hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*hw->switch_info), GFP_KERNEL);
+ sw = hw->switch_info;
+
+ if (!sw)
+ return ICE_ERR_NO_MEMORY;
+
+ INIT_LIST_HEAD(&sw->vsi_list_map_head);
+
+ mutex_init(&sw->mac_list_lock);
+ INIT_LIST_HEAD(&sw->mac_list_head);
+
+ mutex_init(&sw->vlan_list_lock);
+ INIT_LIST_HEAD(&sw->vlan_list_head);
+
+ mutex_init(&sw->eth_m_list_lock);
+ INIT_LIST_HEAD(&sw->eth_m_list_head);
+
+ mutex_init(&sw->promisc_list_lock);
+ INIT_LIST_HEAD(&sw->promisc_list_head);
+
+ mutex_init(&sw->mac_vlan_list_lock);
+ INIT_LIST_HEAD(&sw->mac_vlan_list_head);
+
+ return 0;
+}
+
+/**
+ * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
+ * @hw: pointer to the hw struct
+ */
+static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_vsi_list_map_info *v_pos_map;
+ struct ice_vsi_list_map_info *v_tmp_map;
+
+ list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
+ list_entry) {
+ list_del(&v_pos_map->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), v_pos_map);
+ }
+
+ mutex_destroy(&sw->mac_list_lock);
+ mutex_destroy(&sw->vlan_list_lock);
+ mutex_destroy(&sw->eth_m_list_lock);
+ mutex_destroy(&sw->promisc_list_lock);
+ mutex_destroy(&sw->mac_vlan_list_lock);
+
+ devm_kfree(ice_hw_to_dev(hw), sw);
+}
+
+/**
+ * ice_init_hw - main hardware initialization routine
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_init_hw(struct ice_hw *hw)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+ u16 mac_buf_len;
+ void *mac_buf;
+
+ /* Set MAC type based on DeviceID */
+ status = ice_set_mac_type(hw);
+ if (status)
+ return status;
+
+ hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
+ PF_FUNC_RID_FUNC_NUM_M) >>
+ PF_FUNC_RID_FUNC_NUM_S;
+
+ status = ice_reset(hw, ICE_RESET_PFR);
+ if (status)
+ return status;
+
+ /* set these values to minimum allowed */
+ hw->itr_gran_200 = ICE_ITR_GRAN_MIN_200;
+ hw->itr_gran_100 = ICE_ITR_GRAN_MIN_100;
+ hw->itr_gran_50 = ICE_ITR_GRAN_MIN_50;
+ hw->itr_gran_25 = ICE_ITR_GRAN_MIN_25;
+
+ status = ice_init_all_ctrlq(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ status = ice_clear_pf_cfg(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ ice_clear_pxe_mode(hw);
+
+ status = ice_init_nvm(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ status = ice_get_caps(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*hw->port_info), GFP_KERNEL);
+ if (!hw->port_info) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll_cqinit;
+ }
+
+ /* set the back pointer to hw */
+ hw->port_info->hw = hw;
+
+ /* Initialize port_info struct with switch configuration data */
+ status = ice_get_initial_sw_cfg(hw);
+ if (status)
+ goto err_unroll_alloc;
+
+ hw->evb_veb = true;
+
+ /* Query the allocated resources for tx scheduler */
+ status = ice_sched_query_res_alloc(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Failed to get scheduler allocated resources\n");
+ goto err_unroll_alloc;
+ }
+
+ /* Initialize port_info struct with scheduler data */
+ status = ice_sched_init_port(hw->port_info);
+ if (status)
+ goto err_unroll_sched;
+
+ pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll_sched;
+ }
+
+ /* Initialize port_info struct with PHY capabilities */
+ status = ice_aq_get_phy_caps(hw->port_info, false,
+ ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
+ devm_kfree(ice_hw_to_dev(hw), pcaps);
+ if (status)
+ goto err_unroll_sched;
+
+ /* Initialize port_info struct with link information */
+ status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
+ if (status)
+ goto err_unroll_sched;
+
+ status = ice_init_fltr_mgmt_struct(hw);
+ if (status)
+ goto err_unroll_sched;
+
+ /* Get port MAC information */
+ mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
+ mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
+
+ if (!mac_buf)
+ goto err_unroll_fltr_mgmt_struct;
+
+ status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
+ devm_kfree(ice_hw_to_dev(hw), mac_buf);
+
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
+
+ ice_init_flex_parser(hw);
+
+ return 0;
+
+err_unroll_fltr_mgmt_struct:
+ ice_cleanup_fltr_mgmt_struct(hw);
+err_unroll_sched:
+ ice_sched_cleanup_all(hw);
+err_unroll_alloc:
+ devm_kfree(ice_hw_to_dev(hw), hw->port_info);
+err_unroll_cqinit:
+ ice_shutdown_all_ctrlq(hw);
+ return status;
+}
+
+/**
+ * ice_deinit_hw - unroll initialization operations done by ice_init_hw
+ * @hw: pointer to the hardware structure
+ */
+void ice_deinit_hw(struct ice_hw *hw)
+{
+ ice_sched_cleanup_all(hw);
+ ice_shutdown_all_ctrlq(hw);
+
+ if (hw->port_info) {
+ devm_kfree(ice_hw_to_dev(hw), hw->port_info);
+ hw->port_info = NULL;
+ }
+
+ ice_cleanup_fltr_mgmt_struct(hw);
+}
+
+/**
+ * ice_check_reset - Check to see if a global reset is complete
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_check_reset(struct ice_hw *hw)
+{
+ u32 cnt, reg = 0, grst_delay;
+
+ /* Poll for Device Active state in case a recent CORER, GLOBR,
+ * or EMPR has occurred. The grst delay value is in 100ms units.
+ * Add 1sec for outstanding AQ commands that can take a long time.
+ */
+ grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
+ GLGEN_RSTCTL_GRSTDEL_S) + 10;
+
+ for (cnt = 0; cnt < grst_delay; cnt++) {
+ mdelay(100);
+ reg = rd32(hw, GLGEN_RSTAT);
+ if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
+ break;
+ }
+
+ if (cnt == grst_delay) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Global reset polling failed to complete.\n");
+ return ICE_ERR_RESET_FAILED;
+ }
+
+#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
+ GLNVM_ULD_GLOBR_DONE_M)
+
+ /* Device is Active; check Global Reset processes are done */
+ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
+ if (reg == ICE_RESET_DONE_MASK) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Global reset processes done. %d\n", cnt);
+ break;
+ }
+ mdelay(10);
+ }
+
+ if (cnt == ICE_PF_RESET_WAIT_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
+ reg);
+ return ICE_ERR_RESET_FAILED;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * If a global reset has been triggered, this function checks
+ * for its completion and then issues the PF reset
+ */
+static enum ice_status ice_pf_reset(struct ice_hw *hw)
+{
+ u32 cnt, reg;
+
+ /* If at function entry a global reset was already in progress, i.e.
+ * state is not 'device active' or any of the reset done bits are not
+ * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
+ * global reset is done.
+ */
+ if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
+ (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
+ /* poll on global reset currently in progress until done */
+ if (ice_check_reset(hw))
+ return ICE_ERR_RESET_FAILED;
+
+ return 0;
+ }
+
+ /* Reset the PF */
+ reg = rd32(hw, PFGEN_CTRL);
+
+ wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
+
+ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, PFGEN_CTRL);
+ if (!(reg & PFGEN_CTRL_PFSWR_M))
+ break;
+
+ mdelay(1);
+ }
+
+ if (cnt == ICE_PF_RESET_WAIT_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "PF reset polling failed to complete.\n");
+ return ICE_ERR_RESET_FAILED;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_reset - Perform different types of reset
+ * @hw: pointer to the hardware structure
+ * @req: reset request
+ *
+ * This function triggers a reset as specified by the req parameter.
+ *
+ * Note:
+ * If anything other than a PF reset is triggered, PXE mode is restored.
+ * This has to be cleared using ice_clear_pxe_mode again, once the AQ
+ * interface has been restored in the rebuild flow.
+ */
+enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
+{
+ u32 val = 0;
+
+ switch (req) {
+ case ICE_RESET_PFR:
+ return ice_pf_reset(hw);
+ case ICE_RESET_CORER:
+ ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
+ val = GLGEN_RTRIG_CORER_M;
+ break;
+ case ICE_RESET_GLOBR:
+ ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
+ val = GLGEN_RTRIG_GLOBR_M;
+ break;
+ }
+
+ val |= rd32(hw, GLGEN_RTRIG);
+ wr32(hw, GLGEN_RTRIG, val);
+ ice_flush(hw);
+
+ /* wait for the FW to be ready */
+ return ice_check_reset(hw);
+}
+
+/**
+ * ice_copy_rxq_ctx_to_hw
+ * @hw: pointer to the hardware structure
+ * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_index: the index of the rx queue
+ *
+ * Copies rxq context from dense structure to hw register space
+ */
+static enum ice_status
+ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+{
+ u8 i;
+
+ if (!ice_rxq_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Copy each dword separately to hw */
+ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ wr32(hw, QRX_CONTEXT(i, rxq_index),
+ *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
+ *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ }
+
+ return 0;
+}
+
+/* LAN Rx Queue Context */
+static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
+ ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
+ ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
+ ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
+ ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
+ ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
+ ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
+ ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
+ ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
+ ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
+ ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
+ ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
+ ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
+ ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
+ ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
+ ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
+ ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
+ ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
+ ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
+ { 0 }
+};
+
+/**
+ * ice_write_rxq_ctx
+ * @hw: pointer to the hardware structure
+ * @rlan_ctx: pointer to the rxq context
+ * @rxq_index: the index of the rx queue
+ *
+ * Converts rxq context from sparse to dense structure and then writes
+ * it to hw register space
+ */
+enum ice_status
+ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
+{
+ u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+
+ ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
+ return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+}
+
+/* LAN Tx Queue Context */
+const struct ice_ctx_ele ice_tlan_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
+ ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
+ ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
+ ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
+ ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
+ ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
+ ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
+ ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
+ ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
+ ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
+ ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
+ ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
+ ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
+ ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
+ ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
+ ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
+ ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
+ ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
+ ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
+ ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
+ ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
+ ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
+ ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
+ ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
+ ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
+ ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
+ { 0 }
+};
+
+/**
+ * ice_debug_cq
+ * @hw: pointer to the hardware structure
+ * @mask: debug mask
+ * @desc: pointer to control queue descriptor
+ * @buf: pointer to command buffer
+ * @buf_len: max length of buf
+ *
+ * Dumps debug log about control command with descriptor contents.
+ */
+void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
+ void *buf, u16 buf_len)
+{
+ struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
+ u16 len;
+
+#ifndef CONFIG_DYNAMIC_DEBUG
+ if (!(mask & hw->debug_mask))
+ return;
+#endif
+
+ if (!desc)
+ return;
+
+ len = le16_to_cpu(cq_desc->datalen);
+
+ ice_debug(hw, mask,
+ "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ le16_to_cpu(cq_desc->opcode),
+ le16_to_cpu(cq_desc->flags),
+ le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
+ ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->cookie_high),
+ le32_to_cpu(cq_desc->cookie_low));
+ ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.param0),
+ le32_to_cpu(cq_desc->params.generic.param1));
+ ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ le32_to_cpu(cq_desc->params.generic.addr_high),
+ le32_to_cpu(cq_desc->params.generic.addr_low));
+ if (buf && cq_desc->datalen != 0) {
+ ice_debug(hw, mask, "Buffer:\n");
+ if (buf_len < len)
+ len = buf_len;
+
+ ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
+ }
+}
+
+/* FW Admin Queue command wrappers */
+
+/**
+ * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ *
+ * Helper function to send FW Admin Queue commands to the FW Admin Queue.
+ */
+enum ice_status
+ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_get_fw_ver
+ * @hw: pointer to the hw struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get the firmware version (0x0001) from the admin queue commands
+ */
+enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_ver *resp;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ resp = &desc.params.get_ver;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ if (!status) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = le32_to_cpu(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_q_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well (0x0003).
+ */
+enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
+{
+ struct ice_aqc_q_shutdown *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.q_shutdown;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_req_res
+ * @hw: pointer to the hw struct
+ * @res: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cd: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands (0x0008)
+ */
+static enum ice_status
+ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
+ enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_req_res *cmd_resp;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
+
+ cmd_resp->res_id = cpu_to_le16(res);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->res_number = cpu_to_le32(sdp_number);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by someone else, the command completes with
+ * busy return value and the timeout field indicates the maximum time
+ * the current owner of the resource has to free it.
+ */
+ if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * ice_aq_release_res
+ * @hw: pointer to the hw struct
+ * @res: resource id
+ * @sdp_number: resource number
+ * @cd: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands (0x0009)
+ */
+static enum ice_status
+ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_req_res *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
+
+ cmd->res_id = cpu_to_le16(res);
+ cmd->res_number = cpu_to_le32(sdp_number);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_acquire_res
+ * @hw: pointer to the HW structure
+ * @res: resource id
+ * @access: access type (read or write)
+ *
+ * This function will attempt to acquire the ownership of a resource.
+ */
+enum ice_status
+ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
+ enum ice_aq_res_access_type access)
+{
+#define ICE_RES_POLLING_DELAY_MS 10
+ u32 delay = ICE_RES_POLLING_DELAY_MS;
+ enum ice_status status;
+ u32 time_left = 0;
+ u32 timeout;
+
+ status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
+
+ /* An admin queue return code of ICE_AQ_RC_EEXIST means that another
+ * driver has previously acquired the resource and performed any
+ * necessary updates; in this case the caller does not obtain the
+ * resource and has no further work to do.
+ */
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
+ status = ICE_ERR_AQ_NO_WORK;
+ goto ice_acquire_res_exit;
+ }
+
+ if (status)
+ ice_debug(hw, ICE_DBG_RES,
+ "resource %d acquire type %d failed.\n", res, access);
+
+ /* If necessary, poll until the current lock owner timeouts */
+ timeout = time_left;
+ while (status && timeout && time_left) {
+ mdelay(delay);
+ timeout = (timeout > delay) ? timeout - delay : 0;
+ status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
+
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) {
+ /* lock free, but no work to do */
+ status = ICE_ERR_AQ_NO_WORK;
+ break;
+ }
+
+ if (!status)
+ /* lock acquired */
+ break;
+ }
+ if (status && status != ICE_ERR_AQ_NO_WORK)
+ ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
+
+ice_acquire_res_exit:
+ if (status == ICE_ERR_AQ_NO_WORK) {
+ if (access == ICE_RES_WRITE)
+ ice_debug(hw, ICE_DBG_RES,
+ "resource indicates no work to do.\n");
+ else
+ ice_debug(hw, ICE_DBG_RES,
+ "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
+ }
+ return status;
+}
+
+/**
+ * ice_release_res
+ * @hw: pointer to the HW structure
+ * @res: resource id
+ *
+ * This function will release a resource using the proper Admin Command.
+ */
+void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
+{
+ enum ice_status status;
+ u32 total_delay = 0;
+
+ status = ice_aq_release_res(hw, res, 0, NULL);
+
+ /* there are some rare cases when trying to release the resource
+ * results in an admin Q timeout, so handle them correctly
+ */
+ while ((status == ICE_ERR_AQ_TIMEOUT) &&
+ (total_delay < hw->adminq.sq_cmd_timeout)) {
+ mdelay(1);
+ status = ice_aq_release_res(hw, res, 0, NULL);
+ total_delay++;
+ }
+}
+
+/**
+ * ice_parse_caps - parse function/device capabilities
+ * @hw: pointer to the hw struct
+ * @buf: pointer to a buffer containing function/device capability records
+ * @cap_count: number of capability records in the list
+ * @opc: type of capabilities list to parse
+ *
+ * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
+ */
+static void
+ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
+ enum ice_adminq_opc opc)
+{
+ struct ice_aqc_list_caps_elem *cap_resp;
+ struct ice_hw_func_caps *func_p = NULL;
+ struct ice_hw_dev_caps *dev_p = NULL;
+ struct ice_hw_common_caps *caps;
+ u32 i;
+
+ if (!buf)
+ return;
+
+ cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+
+ if (opc == ice_aqc_opc_list_dev_caps) {
+ dev_p = &hw->dev_caps;
+ caps = &dev_p->common_cap;
+ } else if (opc == ice_aqc_opc_list_func_caps) {
+ func_p = &hw->func_caps;
+ caps = &func_p->common_cap;
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
+ return;
+ }
+
+ for (i = 0; caps && i < cap_count; i++, cap_resp++) {
+ u32 logical_id = le32_to_cpu(cap_resp->logical_id);
+ u32 phys_id = le32_to_cpu(cap_resp->phys_id);
+ u32 number = le32_to_cpu(cap_resp->number);
+ u16 cap = le16_to_cpu(cap_resp->cap);
+
+ switch (cap) {
+ case ICE_AQC_CAPS_VSI:
+ if (dev_p) {
+ dev_p->num_vsi_allocd_to_host = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Dev.VSI cnt = %d\n",
+ dev_p->num_vsi_allocd_to_host);
+ } else if (func_p) {
+ func_p->guaranteed_num_vsi = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Func.VSI cnt = %d\n",
+ func_p->guaranteed_num_vsi);
+ }
+ break;
+ case ICE_AQC_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: RSS table size = %d\n",
+ caps->rss_table_size);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: RSS table width = %d\n",
+ caps->rss_table_entry_width);
+ break;
+ case ICE_AQC_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Rx first queue ID = %d\n",
+ caps->rxq_first_id);
+ break;
+ case ICE_AQC_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Num Tx Qs = %d\n", caps->num_txq);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Tx first queue ID = %d\n",
+ caps->txq_first_id);
+ break;
+ case ICE_AQC_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: MSIX vector count = %d\n",
+ caps->num_msix_vectors);
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: MSIX first vector index = %d\n",
+ caps->msix_vector_first_id);
+ break;
+ case ICE_AQC_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ if (dev_p)
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Dev.MaxMTU = %d\n",
+ caps->max_mtu);
+ else if (func_p)
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: func.MaxMTU = %d\n",
+ caps->max_mtu);
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Unknown capability[%d]: 0x%x\n", i,
+ cap);
+ break;
+ }
+ }
+}
+
+/**
+ * ice_aq_discover_caps - query function/device capabilities
+ * @hw: pointer to the hw struct
+ * @buf: a virtual buffer to hold the capabilities
+ * @buf_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buf size needed if AQ err==ENOMEM
+ * @opc: capabilities type to discover - pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get the function(0x000a)/device(0x000b) capabilities description from
+ * the firmware.
+ */
+static enum ice_status
+ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u16 *data_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_list_caps *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ice_aqc_opc_list_func_caps &&
+ opc != ice_aqc_opc_list_dev_caps)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status)
+ ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
+ *data_size = le16_to_cpu(desc.datalen);
+
+ return status;
+}
+
+/**
+ * ice_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_get_caps(struct ice_hw *hw)
+{
+ enum ice_status status;
+ u16 data_size = 0;
+ u16 cbuf_len;
+ u8 retries;
+
+ /* The driver doesn't know how many capabilities the device will return
+ * so the buffer size required isn't known ahead of time. The driver
+ * starts with cbuf_len and if this turns out to be insufficient, the
+ * device returns ICE_AQ_RC_ENOMEM and also the buffer size it needs.
+ * The driver then allocates the buffer of this size and retries the
+ * operation. So it follows that the retry count is 2.
+ */
+#define ICE_GET_CAP_BUF_COUNT 40
+#define ICE_GET_CAP_RETRY_COUNT 2
+
+ cbuf_len = ICE_GET_CAP_BUF_COUNT *
+ sizeof(struct ice_aqc_list_caps_elem);
+
+ retries = ICE_GET_CAP_RETRY_COUNT;
+
+ do {
+ void *cbuf;
+
+ cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
+ if (!cbuf)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &data_size,
+ ice_aqc_opc_list_func_caps, NULL);
+ devm_kfree(ice_hw_to_dev(hw), cbuf);
+
+ if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
+ break;
+
+ /* If ENOMEM is returned, try again with bigger buffer */
+ cbuf_len = data_size;
+ } while (--retries);
+
+ return status;
+}
+
+/**
+ * ice_aq_manage_mac_write - manage MAC address write command
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
+ * @flags: flags to control write behavior
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function is used to write MAC address to the NVM (0x0108).
+ */
+enum ice_status
+ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_manage_mac_write *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.mac_write;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
+
+ cmd->flags = flags;
+
+ /* Prep values for flags, sah, sal */
+ cmd->sah = htons(*((u16 *)mac_addr));
+ cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ *
+ * Tell the firmware that the driver is taking over from PXE (0x0110).
+ */
+static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
+ desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ */
+void ice_clear_pxe_mode(struct ice_hw *hw)
+{
+ if (ice_check_sq_alive(hw, &hw->adminq))
+ ice_aq_clear_pxe_mode(hw);
+}
+
+/**
+ * ice_aq_set_phy_cfg
+ * @hw: pointer to the hw struct
+ * @lport: logical port number
+ * @cfg: structure with PHY configuration data to be set
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters supported on the Port.
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters. This status will be indicated by the command response (0x0601).
+ */
+static enum ice_status
+ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
+ struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_phy_cfg *cmd;
+ struct ice_aq_desc desc;
+
+ if (!cfg)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.set_phy;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
+ cmd->lport_num = lport;
+
+ return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
+}
+
+/**
+ * ice_update_link_info - update status of the HW network link
+ * @pi: port info structure of the interested logical port
+ */
+static enum ice_status
+ice_update_link_info(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_phy_info *phy_info;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+
+ pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ phy_info = &pi->phy;
+ status = ice_aq_get_link_info(pi, true, NULL, NULL);
+ if (status)
+ goto out;
+
+ if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ pcaps, NULL);
+ if (status)
+ goto out;
+
+ memcpy(phy_info->link_info.module_type, &pcaps->module_type,
+ sizeof(phy_info->link_info.module_type));
+ }
+out:
+ devm_kfree(ice_hw_to_dev(hw), pcaps);
+ return status;
+}
+
+/**
+ * ice_set_fc
+ * @pi: port information structure
+ * @aq_failures: pointer to status code, specific to ice_set_fc routine
+ * @atomic_restart: enable automatic link update
+ *
+ * Set the requested flow control mode.
+ */
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart)
+{
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+ u8 pause_mask = 0x0;
+ struct ice_hw *hw;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ hw = pi->hw;
+ *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
+
+ switch (pi->fc.req_mode) {
+ case ICE_FC_FULL:
+ pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ICE_FC_RX_PAUSE:
+ pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ICE_FC_TX_PAUSE:
+ pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Get the current phy config */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status) {
+ *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
+ goto out;
+ }
+
+ /* clear the old pause settings */
+ cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
+ ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+ /* set the new capabilities */
+ cfg.caps |= pause_mask;
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps->caps) {
+ int retry_count, retry_max = 10;
+
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ cfg.phy_type_low = pcaps->phy_type_low;
+ cfg.low_power_ctrl = pcaps->low_power_ctrl;
+ cfg.eee_cap = pcaps->eee_cap;
+ cfg.eeer_value = pcaps->eeer_value;
+ cfg.link_fec_opt = pcaps->link_fec_options;
+
+ status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
+ if (status) {
+ *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
+ goto out;
+ }
+
+ /* Update the link info
+ * It sometimes takes a really long time for link to
+ * come back from the atomic reset. Thus, we wait a
+ * little bit.
+ */
+ for (retry_count = 0; retry_count < retry_max; retry_count++) {
+ status = ice_update_link_info(pi);
+
+ if (!status)
+ break;
+
+ mdelay(100);
+ }
+
+ if (status)
+ *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
+ }
+
+out:
+ devm_kfree(ice_hw_to_dev(hw), pcaps);
+ return status;
+}
+
+/**
+ * ice_get_link_status - get status of the HW network link
+ * @pi: port information structure
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ */
+enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
+{
+ struct ice_phy_info *phy_info;
+ enum ice_status status = 0;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ phy_info = &pi->phy;
+
+ if (phy_info->get_link_info) {
+ status = ice_update_link_info(pi);
+
+ if (status)
+ ice_debug(pi->hw, ICE_DBG_LINK,
+ "get link status error, status = %d\n",
+ status);
+ }
+
+ *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
+
+ return status;
+}
+
+/**
+ * ice_aq_set_link_restart_an
+ * @pi: pointer to the port information structure
+ * @ena_link: if true: enable link, if false: disable link
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ */
+enum ice_status
+ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_restart_an *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
+
+ cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
+ cmd->lport_num = pi->lport;
+ if (ena_link)
+ cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
+
+ return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_event_mask
+ * @hw: pointer to the hw struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set event mask (0x0613)
+ */
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_event_mask *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * __ice_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: VSI FW index
+ * @lut_type: LUT table type
+ * @lut: pointer to the LUT buffer provided by the caller
+ * @lut_size: size of the LUT buffer
+ * @glob_lut_idx: global LUT index
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
+ */
+static enum ice_status
+__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ u16 lut_size, u8 glob_lut_idx, bool set)
+{
+ struct ice_aqc_get_set_rss_lut *cmd_resp;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 flags = 0;
+
+ cmd_resp = &desc.params.get_set_rss_lut;
+
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
+ }
+
+ cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
+ ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
+ ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
+ ICE_AQC_GSET_RSS_LUT_VSI_VALID);
+
+ switch (lut_type) {
+ case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
+ case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
+ case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
+ flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
+ break;
+ default:
+ status = ICE_ERR_PARAM;
+ goto ice_aq_get_set_rss_lut_exit;
+ }
+
+ if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
+ flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
+ ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
+
+ if (!set)
+ goto ice_aq_get_set_rss_lut_send;
+ } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+ if (!set)
+ goto ice_aq_get_set_rss_lut_send;
+ } else {
+ goto ice_aq_get_set_rss_lut_send;
+ }
+
+ /* LUT size is only valid for Global and PF table types */
+ if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) {
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) &&
+ (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) {
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ } else {
+ status = ICE_ERR_PARAM;
+ goto ice_aq_get_set_rss_lut_exit;
+ }
+
+ice_aq_get_set_rss_lut_send:
+ cmd_resp->flags = cpu_to_le16(flags);
+ status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
+
+ice_aq_get_set_rss_lut_exit:
+ return status;
+}
+
+/**
+ * ice_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: VSI FW index
+ * @lut_type: LUT table type
+ * @lut: pointer to the LUT buffer provided by the caller
+ * @lut_size: size of the LUT buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ */
+enum ice_status
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ u16 lut_size)
+{
+ return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
+ false);
+}
+
+/**
+ * ice_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: VSI FW index
+ * @lut_type: LUT table type
+ * @lut: pointer to the LUT buffer provided by the caller
+ * @lut_size: size of the LUT buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ */
+enum ice_status
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ u16 lut_size)
+{
+ return __ice_aq_get_set_rss_lut(hw, vsi_id, lut_type, lut, lut_size, 0,
+ true);
+}
+
+/**
+ * __ice_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: VSI FW index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get (0x0B04) or set (0x0B02) the RSS key per VSI
+ */
+static enum
+ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *key,
+ bool set)
+{
+ struct ice_aqc_get_set_rss_key *cmd_resp;
+ u16 key_size = sizeof(*key);
+ struct ice_aq_desc desc;
+
+ cmd_resp = &desc.params.get_set_rss_key;
+
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
+ }
+
+ cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
+ ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
+ ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
+ ICE_AQC_GSET_RSS_KEY_VSI_VALID);
+
+ return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
+}
+
+/**
+ * ice_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: VSI FW index
+ * @key: pointer to key info struct
+ *
+ * get the RSS key per VSI
+ */
+enum ice_status
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *key)
+{
+ return __ice_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * ice_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: VSI FW index
+ * @keys: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ */
+enum ice_status
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *keys)
+{
+ return __ice_aq_get_set_rss_key(hw, vsi_id, keys, true);
+}
+
+/**
+ * ice_aq_add_lan_txq
+ * @hw: pointer to the hardware structure
+ * @num_qgrps: Number of added queue groups
+ * @qg_list: list of queue groups to be added
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add Tx LAN queue (0x0C30)
+ *
+ * NOTE:
+ * Prior to calling add Tx LAN queue:
+ * Initialize the following as part of the Tx queue context:
+ * Completion queue ID if the queue uses Completion queue, Quanta profile,
+ * Cache profile and Packet shaper profile.
+ *
+ * After add Tx LAN queue AQ command is completed:
+ * Interrupts should be associated with specific queues,
+ * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
+ * flow.
+ */
+static enum ice_status
+ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
+ struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ u16 i, sum_header_size, sum_q_size = 0;
+ struct ice_aqc_add_tx_qgrp *list;
+ struct ice_aqc_add_txqs *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.add_txqs;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
+
+ if (!qg_list)
+ return ICE_ERR_PARAM;
+
+ if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
+ return ICE_ERR_PARAM;
+
+ sum_header_size = num_qgrps *
+ (sizeof(*qg_list) - sizeof(*qg_list->txqs));
+
+ list = qg_list;
+ for (i = 0; i < num_qgrps; i++) {
+ struct ice_aqc_add_txqs_perq *q = list->txqs;
+
+ sum_q_size += list->num_txqs * sizeof(*q);
+ list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
+ }
+
+ if (buf_size != (sum_header_size + sum_q_size))
+ return ICE_ERR_PARAM;
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->num_qgrps = num_qgrps;
+
+ return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
+}
+
+/**
+ * ice_aq_dis_lan_txq
+ * @hw: pointer to the hardware structure
+ * @num_qgrps: number of groups in the list
+ * @qg_list: the list of groups to disable
+ * @buf_size: the total size of the qg_list buffer in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * Disable LAN Tx queue (0x0C31)
+ */
+static enum ice_status
+ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
+ struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_dis_txqs *cmd;
+ struct ice_aq_desc desc;
+ u16 i, sz = 0;
+
+ cmd = &desc.params.dis_txqs;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
+
+ if (!qg_list)
+ return ICE_ERR_PARAM;
+
+ if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
+ return ICE_ERR_PARAM;
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd->num_entries = num_qgrps;
+
+ for (i = 0; i < num_qgrps; ++i) {
+ /* Calculate the size taken up by the queue IDs in this group */
+ sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
+
+ /* Add the size of the group header */
+ sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
+
+ /* If the num of queues is even, add 2 bytes of padding */
+ if ((qg_list[i].num_qs % 2) == 0)
+ sz += 2;
+ }
+
+ if (buf_size != sz)
+ return ICE_ERR_PARAM;
+
+ return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
+}
+
+/* End of FW Admin Queue command wrappers */
+
+/**
+ * ice_write_byte - write a byte to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
+{
+ u8 src_byte, dest_byte, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = (u8)(BIT(ce_info->width) - 1);
+
+ src_byte = *from;
+ src_byte &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_byte <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ memcpy(&dest_byte, dest, sizeof(dest_byte));
+
+ dest_byte &= ~mask; /* get the bits not changing */
+ dest_byte |= src_byte; /* add in the new bits */
+
+ /* put it all back */
+ memcpy(dest, &dest_byte, sizeof(dest_byte));
+}
+
+/**
+ * ice_write_word - write a word to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
+{
+ u16 src_word, mask;
+ __le16 dest_word;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_word = *(u16 *)from;
+ src_word &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_word <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ memcpy(&dest_word, dest, sizeof(dest_word));
+
+ dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
+ dest_word |= cpu_to_le16(src_word); /* add in the new bits */
+
+ /* put it all back */
+ memcpy(dest, &dest_word, sizeof(dest_word));
+}
+
+/**
+ * ice_write_dword - write a dword to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
+{
+ u32 src_dword, mask;
+ __le32 dest_dword;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = (u32)~0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_dword = *(u32 *)from;
+ src_dword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_dword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ memcpy(&dest_dword, dest, sizeof(dest_dword));
+
+ dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
+ dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
+
+ /* put it all back */
+ memcpy(dest, &dest_dword, sizeof(dest_dword));
+}
+
+/**
+ * ice_write_qword - write a qword to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
+{
+ u64 src_qword, mask;
+ __le64 dest_qword;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = (u64)~0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_qword = *(u64 *)from;
+ src_qword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_qword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ memcpy(&dest_qword, dest, sizeof(dest_qword));
+
+ dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
+ dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
+
+ /* put it all back */
+ memcpy(dest, &dest_qword, sizeof(dest_qword));
+}
+
+/**
+ * ice_set_ctx - set context bits in packed structure
+ * @src_ctx: pointer to a generic non-packed context structure
+ * @dest_ctx: pointer to memory for the packed structure
+ * @ce_info: a description of the structure to be transformed
+ */
+enum ice_status
+ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width; f++) {
+ /* We have to deal with each element of the FW response
+ * using the correct size so that we are correct regardless
+ * of the endianness of the machine.
+ */
+ switch (ce_info[f].size_of) {
+ case sizeof(u8):
+ ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case sizeof(u16):
+ ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case sizeof(u32):
+ ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case sizeof(u64):
+ ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ default:
+ return ICE_ERR_INVAL_SIZE;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ena_vsi_txq
+ * @pi: port information structure
+ * @vsi_id: VSI id
+ * @tc: tc number
+ * @num_qgrps: Number of added queue groups
+ * @buf: list of queue groups to be added
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function adds one lan q
+ */
+enum ice_status
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
+ struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_txsched_elem_data node = { 0 };
+ struct ice_sched_node *parent;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return ICE_ERR_CFG;
+
+ if (num_qgrps > 1 || buf->num_txqs > 1)
+ return ICE_ERR_MAX_LIMIT;
+
+ hw = pi->hw;
+
+ mutex_lock(&pi->sched_lock);
+
+ /* find a parent node */
+ parent = ice_sched_get_free_qparent(pi, vsi_id, tc,
+ ICE_SCHED_NODE_OWNER_LAN);
+ if (!parent) {
+ status = ICE_ERR_PARAM;
+ goto ena_txq_exit;
+ }
+ buf->parent_teid = parent->info.node_teid;
+ node.parent_teid = parent->info.node_teid;
+ /* Mark that the values in the "generic" section as valid. The default
+ * value in the "generic" section is zero. This means that :
+ * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
+ * - 0 priority among siblings, indicated by Bit 1-3.
+ * - WFQ, indicated by Bit 4.
+ * - 0 Adjustment value is used in PSM credit update flow, indicated by
+ * Bit 5-6.
+ * - Bit 7 is reserved.
+ * Without setting the generic section as valid in valid_sections, the
+ * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
+ */
+ buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
+
+ /* add the lan q */
+ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
+ if (status)
+ goto ena_txq_exit;
+
+ node.node_teid = buf->txqs[0].q_teid;
+ node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+
+ /* add a leaf node into schduler tree q layer */
+ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
+
+ena_txq_exit:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_dis_vsi_txq
+ * @pi: port information structure
+ * @num_queues: number of queues
+ * @q_ids: pointer to the q_id array
+ * @q_teids: pointer to queue node teids
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function removes queues and their corresponding nodes in SW DB
+ */
+enum ice_status
+ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
+ u32 *q_teids, struct ice_sq_cd *cd)
+{
+ enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
+ struct ice_aqc_dis_txq_item qg_list;
+ u16 i;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return ICE_ERR_CFG;
+
+ mutex_lock(&pi->sched_lock);
+
+ for (i = 0; i < num_queues; i++) {
+ struct ice_sched_node *node;
+
+ node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
+ if (!node)
+ continue;
+ qg_list.parent_teid = node->info.parent_teid;
+ qg_list.num_qs = 1;
+ qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
+ status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
+ sizeof(qg_list), cd);
+
+ if (status)
+ break;
+ ice_free_sched_node(pi, node);
+ }
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
+ * @pi: port information structure
+ * @vsi_id: VSI Id
+ * @tc_bitmap: TC bitmap
+ * @maxqs: max queues array per TC
+ * @owner: lan or rdma
+ *
+ * This function adds/updates the VSI queues per TC.
+ */
+static enum ice_status
+ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ u16 *maxqs, u8 owner)
+{
+ enum ice_status status = 0;
+ u8 i;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return ICE_ERR_CFG;
+
+ mutex_lock(&pi->sched_lock);
+
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ /* configuration is possible only if TC node is present */
+ if (!ice_sched_get_tc_node(pi, i))
+ continue;
+
+ status = ice_sched_cfg_vsi(pi, vsi_id, i, maxqs[i], owner,
+ ice_is_tc_ena(tc_bitmap, i));
+ if (status)
+ break;
+ }
+
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_lan - configure VSI lan queues
+ * @pi: port information structure
+ * @vsi_id: VSI Id
+ * @tc_bitmap: TC bitmap
+ * @max_lanqs: max lan queues array per TC
+ *
+ * This function adds/updates the VSI lan queues per TC.
+ */
+enum ice_status
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ u16 *max_lanqs)
+{
+ return ice_cfg_vsi_qs(pi, vsi_id, tc_bitmap, max_lanqs,
+ ICE_SCHED_NODE_OWNER_LAN);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
new file mode 100644
index 000000000000..9a5519130af1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_COMMON_H_
+#define _ICE_COMMON_H_
+
+#include "ice.h"
+#include "ice_type.h"
+#include "ice_switch.h"
+
+void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
+ u16 buf_len);
+enum ice_status ice_init_hw(struct ice_hw *hw);
+void ice_deinit_hw(struct ice_hw *hw);
+enum ice_status ice_check_reset(struct ice_hw *hw);
+enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
+void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+enum ice_status
+ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_rq_event_info *e, u16 *pending);
+enum ice_status
+ice_get_link_status(struct ice_port_info *pi, bool *link_up);
+enum ice_status
+ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
+ enum ice_aq_res_access_type access);
+void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
+enum ice_status ice_init_nvm(struct ice_hw *hw);
+enum ice_status
+ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+void ice_clear_pxe_mode(struct ice_hw *hw);
+enum ice_status ice_get_caps(struct ice_hw *hw);
+enum ice_status
+ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
+
+enum ice_status
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ u16 lut_size);
+enum ice_status
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ u16 lut_size);
+enum ice_status
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *keys);
+enum ice_status
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *keys);
+bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
+enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
+void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
+extern const struct ice_ctx_ele ice_tlan_ctx_info[];
+enum ice_status
+ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
+enum ice_status
+ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
+ void *buf, u16 buf_size, struct ice_sq_cd *cd);
+enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
+ struct ice_sq_cd *cd);
+enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart);
+enum ice_status
+ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
+ struct ice_link_status *link, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
+ u32 *q_teids, struct ice_sq_cd *cmd_details);
+enum ice_status
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_id, u8 tc_bitmap,
+ u16 *max_lanqs);
+enum ice_status
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps,
+ struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
new file mode 100644
index 000000000000..5909a4407e38
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -0,0 +1,1066 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+
+/**
+ * ice_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_adminq_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->adminq;
+
+ cq->sq.head = PF_FW_ATQH;
+ cq->sq.tail = PF_FW_ATQT;
+ cq->sq.len = PF_FW_ATQLEN;
+ cq->sq.bah = PF_FW_ATQBAH;
+ cq->sq.bal = PF_FW_ATQBAL;
+ cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M;
+ cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
+ cq->sq.head_mask = PF_FW_ATQH_ATQH_M;
+
+ cq->rq.head = PF_FW_ARQH;
+ cq->rq.tail = PF_FW_ARQT;
+ cq->rq.len = PF_FW_ARQLEN;
+ cq->rq.bah = PF_FW_ARQBAH;
+ cq->rq.bal = PF_FW_ARQBAL;
+ cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M;
+ cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
+ cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
+}
+
+/**
+ * ice_check_sq_alive
+ * @hw: pointer to the hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Returns true if Queue is enabled else false.
+ */
+bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ /* check both queue-length and queue-enable fields */
+ if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
+ return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
+ cq->sq.len_ena_mask)) ==
+ (cq->num_sq_entries | cq->sq.len_ena_mask);
+
+ return false;
+}
+
+/**
+ * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
+
+ cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
+ &cq->sq.desc_buf.pa,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!cq->sq.desc_buf.va)
+ return ICE_ERR_NO_MEMORY;
+ cq->sq.desc_buf.size = size;
+
+ cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
+ sizeof(struct ice_sq_cd), GFP_KERNEL);
+ if (!cq->sq.cmd_buf) {
+ dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
+ cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
+ cq->sq.desc_buf.va = NULL;
+ cq->sq.desc_buf.pa = 0;
+ cq->sq.desc_buf.size = 0;
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
+
+ cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
+ &cq->rq.desc_buf.pa,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!cq->rq.desc_buf.va)
+ return ICE_ERR_NO_MEMORY;
+ cq->rq.desc_buf.size = size;
+ return 0;
+}
+
+/**
+ * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ */
+static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
+ cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
+ cq->sq.desc_buf.va = NULL;
+ cq->sq.desc_buf.pa = 0;
+ cq->sq.desc_buf.size = 0;
+}
+
+/**
+ * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ */
+static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size,
+ cq->rq.desc_buf.va, cq->rq.desc_buf.pa);
+ cq->rq.desc_buf.va = NULL;
+ cq->rq.desc_buf.pa = 0;
+ cq->rq.desc_buf.size = 0;
+}
+
+/**
+ * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+ cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
+ sizeof(cq->rq.desc_buf), GFP_KERNEL);
+ if (!cq->rq.dma_head)
+ return ICE_ERR_NO_MEMORY;
+ cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < cq->num_rq_entries; i++) {
+ struct ice_aq_desc *desc;
+ struct ice_dma_mem *bi;
+
+ bi = &cq->rq.r.rq_bi[i];
+ bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
+ cq->rq_buf_size, &bi->pa,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!bi->va)
+ goto unwind_alloc_rq_bufs;
+ bi->size = cq->rq_buf_size;
+
+ /* now configure the descriptors for use */
+ desc = ICE_CTL_Q_DESC(cq->rq, i);
+
+ desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > ICE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16(bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.generic.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
+ }
+ return 0;
+
+unwind_alloc_rq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--) {
+ dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
+ cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
+ cq->rq.r.rq_bi[i].va = NULL;
+ cq->rq.r.rq_bi[i].pa = 0;
+ cq->rq.r.rq_bi[i].size = 0;
+ }
+ devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
+ sizeof(cq->sq.desc_buf), GFP_KERNEL);
+ if (!cq->sq.dma_head)
+ return ICE_ERR_NO_MEMORY;
+ cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < cq->num_sq_entries; i++) {
+ struct ice_dma_mem *bi;
+
+ bi = &cq->sq.r.sq_bi[i];
+ bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
+ cq->sq_buf_size, &bi->pa,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!bi->va)
+ goto unwind_alloc_sq_bufs;
+ bi->size = cq->sq_buf_size;
+ }
+ return 0;
+
+unwind_alloc_sq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--) {
+ dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
+ cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
+ cq->sq.r.sq_bi[i].va = NULL;
+ cq->sq.r.sq_bi[i].pa = 0;
+ cq->sq.r.sq_bi[i].size = 0;
+ }
+ devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_free_rq_bufs - Free ARQ buffer info elements
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < cq->num_rq_entries; i++) {
+ dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
+ cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
+ cq->rq.r.rq_bi[i].va = NULL;
+ cq->rq.r.rq_bi[i].pa = 0;
+ cq->rq.r.rq_bi[i].size = 0;
+ }
+
+ /* free the dma header */
+ devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
+}
+
+/**
+ * ice_free_sq_bufs - Free ATQ buffer info elements
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < cq->num_sq_entries; i++)
+ if (cq->sq.r.sq_bi[i].pa) {
+ dmam_free_coherent(ice_hw_to_dev(hw),
+ cq->sq.r.sq_bi[i].size,
+ cq->sq.r.sq_bi[i].va,
+ cq->sq.r.sq_bi[i].pa);
+ cq->sq.r.sq_bi[i].va = NULL;
+ cq->sq.r.sq_bi[i].pa = 0;
+ cq->sq.r.sq_bi[i].size = 0;
+ }
+
+ /* free the buffer info list */
+ devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf);
+
+ /* free the dma header */
+ devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+}
+
+/**
+ * ice_cfg_sq_regs - configure Control ATQ registers
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * Configure base address and length registers for the transmit queue
+ */
+static enum ice_status
+ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, cq->sq.head, 0);
+ wr32(hw, cq->sq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask));
+ wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa));
+ wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, cq->sq.bal);
+ if (reg != lower_32_bits(cq->sq.desc_buf.pa))
+ return ICE_ERR_AQ_ERROR;
+
+ return 0;
+}
+
+/**
+ * ice_cfg_rq_regs - configure Control ARQ register
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * Configure base address and length registers for the receive (event q)
+ */
+static enum ice_status
+ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, cq->rq.head, 0);
+ wr32(hw, cq->rq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask));
+ wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa));
+ wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, cq->rq.bal);
+ if (reg != lower_32_bits(cq->rq.desc_buf.pa))
+ return ICE_ERR_AQ_ERROR;
+
+ return 0;
+}
+
+/**
+ * ice_init_sq - main initialization routine for Control ATQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * This is the main initialization routine for the Control Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the cq->structure:
+ * - cq->num_sq_entries
+ * - cq->sq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ */
+static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code;
+
+ if (cq->sq.count > 0) {
+ /* queue already initialized */
+ ret_code = ICE_ERR_NOT_READY;
+ goto init_ctrlq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if (!cq->num_sq_entries || !cq->sq_buf_size) {
+ ret_code = ICE_ERR_CFG;
+ goto init_ctrlq_exit;
+ }
+
+ cq->sq.next_to_use = 0;
+ cq->sq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = ice_alloc_sq_bufs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* initialize base registers */
+ ret_code = ice_cfg_sq_regs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* success! */
+ cq->sq.count = cq->num_sq_entries;
+ goto init_ctrlq_exit;
+
+init_ctrlq_free_rings:
+ ice_free_ctrlq_sq_ring(hw, cq);
+
+init_ctrlq_exit:
+ return ret_code;
+}
+
+/**
+ * ice_init_rq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the cq->structure:
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ */
+static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code;
+
+ if (cq->rq.count > 0) {
+ /* queue already initialized */
+ ret_code = ICE_ERR_NOT_READY;
+ goto init_ctrlq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if (!cq->num_rq_entries || !cq->rq_buf_size) {
+ ret_code = ICE_ERR_CFG;
+ goto init_ctrlq_exit;
+ }
+
+ cq->rq.next_to_use = 0;
+ cq->rq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = ice_alloc_rq_bufs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* initialize base registers */
+ ret_code = ice_cfg_rq_regs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* success! */
+ cq->rq.count = cq->num_rq_entries;
+ goto init_ctrlq_exit;
+
+init_ctrlq_free_rings:
+ ice_free_ctrlq_rq_ring(hw, cq);
+
+init_ctrlq_exit:
+ return ret_code;
+}
+
+/**
+ * ice_shutdown_sq - shutdown the Control ATQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * The main shutdown routine for the Control Transmit Queue
+ */
+static enum ice_status
+ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code = 0;
+
+ mutex_lock(&cq->sq_lock);
+
+ if (!cq->sq.count) {
+ ret_code = ICE_ERR_NOT_READY;
+ goto shutdown_sq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, cq->sq.head, 0);
+ wr32(hw, cq->sq.tail, 0);
+ wr32(hw, cq->sq.len, 0);
+ wr32(hw, cq->sq.bal, 0);
+ wr32(hw, cq->sq.bah, 0);
+
+ cq->sq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers and the ring itself */
+ ice_free_sq_bufs(hw, cq);
+ ice_free_ctrlq_sq_ring(hw, cq);
+
+shutdown_sq_out:
+ mutex_unlock(&cq->sq_lock);
+ return ret_code;
+}
+
+/**
+ * ice_aq_ver_check - Check the reported AQ API version.
+ * @fw_branch: The "branch" of FW, typically describes the device type
+ * @fw_major: The major version of the FW API
+ * @fw_minor: The minor version increment of the FW API
+ *
+ * Checks if the driver should load on a given AQ API version.
+ *
+ * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
+ */
+static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor)
+{
+ if (fw_branch != EXP_FW_API_VER_BRANCH)
+ return false;
+ if (fw_major != EXP_FW_API_VER_MAJOR)
+ return false;
+ if (fw_minor != EXP_FW_API_VER_MINOR)
+ return false;
+ return true;
+}
+
+/**
+ * ice_shutdown_rq - shutdown Control ARQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * The main shutdown routine for the Control Receive Queue
+ */
+static enum ice_status
+ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code = 0;
+
+ mutex_lock(&cq->rq_lock);
+
+ if (!cq->rq.count) {
+ ret_code = ICE_ERR_NOT_READY;
+ goto shutdown_rq_out;
+ }
+
+ /* Stop Control Queue processing */
+ wr32(hw, cq->rq.head, 0);
+ wr32(hw, cq->rq.tail, 0);
+ wr32(hw, cq->rq.len, 0);
+ wr32(hw, cq->rq.bal, 0);
+ wr32(hw, cq->rq.bah, 0);
+
+ /* set rq.count to 0 to indicate uninitialized queue */
+ cq->rq.count = 0;
+
+ /* free ring buffers and the ring itself */
+ ice_free_rq_bufs(hw, cq);
+ ice_free_ctrlq_rq_ring(hw, cq);
+
+shutdown_rq_out:
+ mutex_unlock(&cq->rq_lock);
+ return ret_code;
+}
+
+/**
+ * ice_init_check_adminq - Check version for Admin Queue to know if its alive
+ * @hw: pointer to the hardware structure
+ */
+static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->adminq;
+ enum ice_status status;
+
+ status = ice_aq_get_fw_ver(hw, NULL);
+ if (status)
+ goto init_ctrlq_free_rq;
+
+ if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver,
+ hw->api_min_ver)) {
+ status = ICE_ERR_FW_API_VER;
+ goto init_ctrlq_free_rq;
+ }
+
+ return 0;
+
+init_ctrlq_free_rq:
+ ice_shutdown_rq(hw, cq);
+ ice_shutdown_sq(hw, cq);
+ mutex_destroy(&cq->sq_lock);
+ mutex_destroy(&cq->rq_lock);
+ return status;
+}
+
+/**
+ * ice_init_ctrlq - main initialization routine for any control Queue
+ * @hw: pointer to the hardware structure
+ * @q_type: specific Control queue type
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the cq->structure:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ */
+static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+{
+ struct ice_ctl_q_info *cq;
+ enum ice_status ret_code;
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ ice_adminq_init_regs(hw);
+ cq = &hw->adminq;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ cq->qtype = q_type;
+
+ /* verify input for valid configuration */
+ if (!cq->num_rq_entries || !cq->num_sq_entries ||
+ !cq->rq_buf_size || !cq->sq_buf_size) {
+ return ICE_ERR_CFG;
+ }
+ mutex_init(&cq->sq_lock);
+ mutex_init(&cq->rq_lock);
+
+ /* setup SQ command write back timeout */
+ cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
+
+ /* allocate the ATQ */
+ ret_code = ice_init_sq(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = ice_init_rq(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_sq;
+
+ /* success! */
+ return 0;
+
+init_ctrlq_free_sq:
+ ice_shutdown_sq(hw, cq);
+init_ctrlq_destroy_locks:
+ mutex_destroy(&cq->sq_lock);
+ mutex_destroy(&cq->rq_lock);
+ return ret_code;
+}
+
+/**
+ * ice_init_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ */
+enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
+{
+ enum ice_status ret_code;
+
+ /* Init FW admin queue */
+ ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ if (ret_code)
+ return ret_code;
+
+ return ice_init_check_adminq(hw);
+}
+
+/**
+ * ice_shutdown_ctrlq - shutdown routine for any control queue
+ * @hw: pointer to the hardware structure
+ * @q_type: specific Control queue type
+ */
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+{
+ struct ice_ctl_q_info *cq;
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ cq = &hw->adminq;
+ if (ice_check_sq_alive(hw, cq))
+ ice_aq_q_shutdown(hw, true);
+ break;
+ default:
+ return;
+ }
+
+ ice_shutdown_sq(hw, cq);
+ ice_shutdown_rq(hw, cq);
+ mutex_destroy(&cq->sq_lock);
+ mutex_destroy(&cq->rq_lock);
+}
+
+/**
+ * ice_shutdown_all_ctrlq - shutdown routine for all control queues
+ * @hw: pointer to the hardware structure
+ */
+void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+{
+ /* Shutdown FW admin queue */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+}
+
+/**
+ * ice_clean_sq - cleans Admin send queue (ATQ)
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * returns the number of free desc
+ */
+static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ struct ice_ctl_q_ring *sq = &cq->sq;
+ u16 ntc = sq->next_to_clean;
+ struct ice_sq_cd *details;
+ struct ice_aq_desc *desc;
+
+ desc = ICE_CTL_Q_DESC(*sq, ntc);
+ details = ICE_CTL_Q_DETAILS(*sq, ntc);
+
+ while (rd32(hw, cq->sq.head) != ntc) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
+ memset(desc, 0, sizeof(*desc));
+ memset(details, 0, sizeof(*details));
+ ntc++;
+ if (ntc == sq->count)
+ ntc = 0;
+ desc = ICE_CTL_Q_DESC(*sq, ntc);
+ details = ICE_CTL_Q_DETAILS(*sq, ntc);
+ }
+
+ sq->next_to_clean = ntc;
+
+ return ICE_CTL_Q_DESC_UNUSED(sq);
+}
+
+/**
+ * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
+ * @hw: pointer to the hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ */
+static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
+}
+
+/**
+ * ice_sq_send_cmd - send command to Control Queue (ATQ)
+ * @hw: pointer to the hw struct
+ * @cq: pointer to the specific Control queue
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buf: buffer to use for indirect commands (or NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
+ * @cd: pointer to command details structure
+ *
+ * This is the main send command routine for the ATQ. It runs the q,
+ * cleans the queue, etc.
+ */
+enum ice_status
+ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_dma_mem *dma_buf = NULL;
+ struct ice_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ enum ice_status status = 0;
+ struct ice_sq_cd *details;
+ u32 total_delay = 0;
+ u16 retval = 0;
+ u32 val = 0;
+
+ mutex_lock(&cq->sq_lock);
+
+ cq->sq_last_status = ICE_AQ_RC_OK;
+
+ if (!cq->sq.count) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send queue not initialized.\n");
+ status = ICE_ERR_AQ_EMPTY;
+ goto sq_send_command_error;
+ }
+
+ if ((buf && !buf_size) || (!buf && buf_size)) {
+ status = ICE_ERR_PARAM;
+ goto sq_send_command_error;
+ }
+
+ if (buf) {
+ if (buf_size > cq->sq_buf_size) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Invalid buffer size for Control Send queue: %d.\n",
+ buf_size);
+ status = ICE_ERR_INVAL_SIZE;
+ goto sq_send_command_error;
+ }
+
+ desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
+ if (buf_size > ICE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ }
+
+ val = rd32(hw, cq->sq.head);
+ if (val >= cq->num_sq_entries) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "head overrun at %d in the Control Send Queue ring\n",
+ val);
+ status = ICE_ERR_AQ_EMPTY;
+ goto sq_send_command_error;
+ }
+
+ details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
+ if (cd)
+ memcpy(details, cd, sizeof(*details));
+ else
+ memset(details, 0, sizeof(*details));
+
+ /* Call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW/MBX; the function returns the
+ * number of desc available. The clean function called here could be
+ * called in a separate thread in case of asynchronous completions.
+ */
+ if (ice_clean_sq(hw, cq) == 0) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Error: Control Send Queue is full.\n");
+ status = ICE_ERR_AQ_FULL;
+ goto sq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
+
+ /* if buf is not NULL assume indirect command */
+ if (buf) {
+ dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
+ /* copy the user buf into the respective DMA buf */
+ memcpy(dma_buf->va, buf, buf_size);
+ desc_on_ring->datalen = cpu_to_le16(buf_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.generic.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buf->pa));
+ desc_on_ring->params.generic.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buf->pa));
+ }
+
+ /* Debug desc and buffer */
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "ATQ: Control Send queue desc and buffer:\n");
+
+ ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);
+
+ (cq->sq.next_to_use)++;
+ if (cq->sq.next_to_use == cq->sq.count)
+ cq->sq.next_to_use = 0;
+ wr32(hw, cq->sq.tail, cq->sq.next_to_use);
+
+ do {
+ if (ice_sq_done(hw, cq))
+ break;
+
+ mdelay(1);
+ total_delay++;
+ } while (total_delay < cq->sq_cmd_timeout);
+
+ /* if ready, copy the desc back to temp */
+ if (ice_sq_done(hw, cq)) {
+ memcpy(desc, desc_on_ring, sizeof(*desc));
+ if (buf) {
+ /* get returned length to copy */
+ u16 copy_size = le16_to_cpu(desc->datalen);
+
+ if (copy_size > buf_size) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Return len %d > than buf len %d\n",
+ copy_size, buf_size);
+ status = ICE_ERR_AQ_ERROR;
+ } else {
+ memcpy(buf, dma_buf->va, copy_size);
+ }
+ }
+ retval = le16_to_cpu(desc->retval);
+ if (retval) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send Queue command completed with error 0x%x\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if (!status && retval != ICE_AQ_RC_OK)
+ status = ICE_ERR_AQ_ERROR;
+ cq->sq_last_status = (enum ice_aq_err)retval;
+ }
+
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "ATQ: desc and buffer writeback:\n");
+
+ ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);
+
+ /* save writeback AQ if requested */
+ if (details->wb_desc)
+ memcpy(details->wb_desc, desc_on_ring,
+ sizeof(*details->wb_desc));
+
+ /* update the error if time out occurred */
+ if (!cmd_completed) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send Queue Writeback timeout.\n");
+ status = ICE_ERR_AQ_TIMEOUT;
+ }
+
+sq_send_command_error:
+ mutex_unlock(&cq->sq_lock);
+ return status;
+}
+
+/**
+ * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ */
+void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
+{
+ /* zero out the desc */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
+}
+
+/**
+ * ice_clean_rq_elem
+ * @hw: pointer to the hw struct
+ * @cq: pointer to the specific Control queue
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'.
+ */
+enum ice_status
+ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_rq_event_info *e, u16 *pending)
+{
+ u16 ntc = cq->rq.next_to_clean;
+ enum ice_status ret_code = 0;
+ struct ice_aq_desc *desc;
+ struct ice_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ memset(&e->desc, 0, sizeof(e->desc));
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&cq->rq_lock);
+
+ if (!cq->rq.count) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Receive queue not initialized.\n");
+ ret_code = ICE_ERR_AQ_EMPTY;
+ goto clean_rq_elem_err;
+ }
+
+ /* set next_to_use to head */
+ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
+
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = ICE_ERR_AQ_NO_WORK;
+ goto clean_rq_elem_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = ICE_CTL_Q_DESC(cq->rq, ntc);
+ desc_idx = ntc;
+
+ flags = le16_to_cpu(desc->flags);
+ if (flags & ICE_AQ_FLAG_ERR) {
+ ret_code = ICE_ERR_AQ_ERROR;
+ cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Receive Queue Event received with error 0x%x\n",
+ cq->rq_last_status);
+ }
+ memcpy(&e->desc, desc, sizeof(e->desc));
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf && e->msg_len)
+ memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
+
+ ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
+
+ ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
+ cq->rq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message size
+ */
+ bi = &cq->rq.r.rq_bi[ntc];
+ memset(desc, 0, sizeof(*desc));
+
+ desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > ICE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->datalen = cpu_to_le16(bi->size);
+ desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, cq->rq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == cq->num_rq_entries)
+ ntc = 0;
+ cq->rq.next_to_clean = ntc;
+ cq->rq.next_to_use = ntu;
+
+clean_rq_elem_out:
+ /* Set pending if needed, unlock and return */
+ if (pending)
+ *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
+clean_rq_elem_err:
+ mutex_unlock(&cq->rq_lock);
+
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
new file mode 100644
index 000000000000..ea02b89243e2
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_CONTROLQ_H_
+#define _ICE_CONTROLQ_H_
+
+#include "ice_adminq_cmd.h"
+
+/* Maximum buffer lengths for all control queue types */
+#define ICE_AQ_MAX_BUF_LEN 4096
+
+#define ICE_CTL_Q_DESC(R, i) \
+ (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
+
+#define ICE_CTL_Q_DESC_UNUSED(R) \
+ (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* Defines that help manage the driver vs FW API checks.
+ * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
+ *
+ */
+#define EXP_FW_API_VER_BRANCH 0x00
+#define EXP_FW_API_VER_MAJOR 0x00
+#define EXP_FW_API_VER_MINOR 0x01
+
+/* Different control queue types: These are mainly for SW consumption. */
+enum ice_ctl_q {
+ ICE_CTL_Q_UNKNOWN = 0,
+ ICE_CTL_Q_ADMIN,
+};
+
+/* Control Queue default settings */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
+
+struct ice_ctl_q_ring {
+ void *dma_head; /* Virtual address to dma head */
+ struct ice_dma_mem desc_buf; /* descriptor ring memory */
+ void *cmd_buf; /* command buffer memory */
+
+ union {
+ struct ice_dma_mem *sq_bi;
+ struct ice_dma_mem *rq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+ u32 len_mask;
+ u32 len_ena_mask;
+ u32 head_mask;
+};
+
+/* sq transaction details */
+struct ice_sq_cd {
+ struct ice_aq_desc *wb_desc;
+};
+
+#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
+
+/* rq event information */
+struct ice_rq_event_info {
+ struct ice_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Control Queue information */
+struct ice_ctl_q_info {
+ enum ice_ctl_q qtype;
+ struct ice_ctl_q_ring rq; /* receive queue */
+ struct ice_ctl_q_ring sq; /* send queue */
+ u32 sq_cmd_timeout; /* send queue cmd write back timeout */
+ u16 num_rq_entries; /* receive queue depth */
+ u16 num_sq_entries; /* send queue depth */
+ u16 rq_buf_size; /* receive queue buffer size */
+ u16 sq_buf_size; /* send queue buffer size */
+ struct mutex sq_lock; /* Send queue lock */
+ struct mutex rq_lock; /* Receive queue lock */
+ enum ice_aq_err sq_last_status; /* last status on send queue */
+ enum ice_aq_err rq_last_status; /* last status on receive queue */
+};
+
+#endif /* _ICE_CONTROLQ_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
new file mode 100644
index 000000000000..0e14d7215a6e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_DEVIDS_H_
+#define _ICE_DEVIDS_H_
+
+/* Device IDs */
+/* Intel(R) Ethernet Controller C810 for backplane */
+#define ICE_DEV_ID_C810_BACKPLANE 0x1591
+/* Intel(R) Ethernet Controller C810 for QSFP */
+#define ICE_DEV_ID_C810_QSFP 0x1592
+/* Intel(R) Ethernet Controller C810 for SFP */
+#define ICE_DEV_ID_C810_SFP 0x1593
+/* Intel(R) Ethernet Controller C810/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_C810_10G_BASE_T 0x1594
+/* Intel(R) Ethernet Controller C810 1GbE */
+#define ICE_DEV_ID_C810_SGMII 0x1595
+
+#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
new file mode 100644
index 000000000000..186764a5c263
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -0,0 +1,940 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+/* ethtool support for ice */
+
+#include "ice.h"
+
+struct ice_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define ICE_STAT(_type, _name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .stat_offset = offsetof(_type, _stat) \
+}
+
+#define ICE_VSI_STAT(_name, _stat) \
+ ICE_STAT(struct ice_vsi, _name, _stat)
+#define ICE_PF_STAT(_name, _stat) \
+ ICE_STAT(struct ice_pf, _name, _stat)
+
+static int ice_q_stats_len(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ return ((np->vsi->num_txq + np->vsi->num_rxq) *
+ (sizeof(struct ice_q_stats) / sizeof(u64)));
+}
+
+#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
+#define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
+
+#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
+ ice_q_stats_len(n))
+
+static const struct ice_stats ice_gstrings_vsi_stats[] = {
+ ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
+ ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
+ ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
+ ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
+ ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
+ ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
+ ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
+ ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
+ ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
+ ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
+ ICE_VSI_STAT("tx_linearize", tx_linearize),
+ ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+ ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
+ ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+};
+
+/* These PF_STATs might look like duplicates of some NETDEV_STATs,
+ * but they aren't. This device is capable of supporting multiple
+ * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
+ * netdevs whereas the PF_STATs are for the physical function that's
+ * hosting these netdevs.
+ *
+ * The PF_STATs are appended to the netdev stats only when ethtool -S
+ * is queried on the base PF netdev.
+ */
+static struct ice_stats ice_gstrings_pf_stats[] = {
+ ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
+ ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
+ ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
+ ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast),
+ ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast),
+ ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast),
+ ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
+ ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
+ ICE_PF_STAT("tx_errors", stats.eth.tx_errors),
+ ICE_PF_STAT("tx_size_64", stats.tx_size_64),
+ ICE_PF_STAT("rx_size_64", stats.rx_size_64),
+ ICE_PF_STAT("tx_size_127", stats.tx_size_127),
+ ICE_PF_STAT("rx_size_127", stats.rx_size_127),
+ ICE_PF_STAT("tx_size_255", stats.tx_size_255),
+ ICE_PF_STAT("rx_size_255", stats.rx_size_255),
+ ICE_PF_STAT("tx_size_511", stats.tx_size_511),
+ ICE_PF_STAT("rx_size_511", stats.rx_size_511),
+ ICE_PF_STAT("tx_size_1023", stats.tx_size_1023),
+ ICE_PF_STAT("rx_size_1023", stats.rx_size_1023),
+ ICE_PF_STAT("tx_size_1522", stats.tx_size_1522),
+ ICE_PF_STAT("rx_size_1522", stats.rx_size_1522),
+ ICE_PF_STAT("tx_size_big", stats.tx_size_big),
+ ICE_PF_STAT("rx_size_big", stats.rx_size_big),
+ ICE_PF_STAT("link_xon_tx", stats.link_xon_tx),
+ ICE_PF_STAT("link_xon_rx", stats.link_xon_rx),
+ ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+ ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
+ ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
+ ICE_PF_STAT("rx_undersize", stats.rx_undersize),
+ ICE_PF_STAT("rx_fragments", stats.rx_fragments),
+ ICE_PF_STAT("rx_oversize", stats.rx_oversize),
+ ICE_PF_STAT("rx_jabber", stats.rx_jabber),
+ ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error),
+ ICE_PF_STAT("rx_length_errors", stats.rx_len_errors),
+ ICE_PF_STAT("rx_dropped", stats.eth.rx_discards),
+ ICE_PF_STAT("rx_crc_errors", stats.crc_errors),
+ ICE_PF_STAT("illegal_bytes", stats.illegal_bytes),
+ ICE_PF_STAT("mac_local_faults", stats.mac_local_faults),
+ ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+};
+
+static u32 ice_regs_dump_list[] = {
+ PFGEN_STATE,
+ PRTGEN_STATUS,
+ QRX_CTRL(0),
+ QINT_TQCTL(0),
+ QINT_RQCTL(0),
+ PFINT_OICR_ENA,
+ QRX_ITR(0),
+};
+
+/**
+ * ice_nvm_version_str - format the NVM version strings
+ * @hw: ptr to the hardware info
+ */
+static char *ice_nvm_version_str(struct ice_hw *hw)
+{
+ static char buf[ICE_ETHTOOL_FWVER_LEN];
+ u8 ver, patch;
+ u32 full_ver;
+ u16 build;
+
+ full_ver = hw->nvm.oem_ver;
+ ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
+ build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
+ ICE_OEM_VER_BUILD_SHIFT);
+ patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
+
+ snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
+ (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
+ (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
+ hw->nvm.eetrack, ver, build, patch);
+
+ return buf;
+}
+
+static void
+ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static int ice_get_regs_len(struct net_device __always_unused *netdev)
+{
+ return ARRAY_SIZE(ice_regs_dump_list);
+}
+
+static void
+ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 *regs_buf = (u32 *)p;
+ int i;
+
+ regs->version = 1;
+
+ for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list) / sizeof(u32); ++i)
+ regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
+}
+
+static u32 ice_get_msglevel(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+#ifndef CONFIG_DYNAMIC_DEBUG
+ if (pf->hw.debug_mask)
+ netdev_info(netdev, "hw debug_mask: 0x%llX\n",
+ pf->hw.debug_mask);
+#endif /* !CONFIG_DYNAMIC_DEBUG */
+
+ return pf->msg_enable;
+}
+
+static void ice_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+#ifndef CONFIG_DYNAMIC_DEBUG
+ if (ICE_DBG_USER & data)
+ pf->hw.debug_mask = data;
+ else
+ pf->msg_enable = data;
+#else
+ pf->msg_enable = data;
+#endif /* !CONFIG_DYNAMIC_DEBUG */
+}
+
+static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ char *p = (char *)data;
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ ice_gstrings_vsi_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+
+ ice_for_each_txq(vsi, i) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "tx-queue-%u.tx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+
+ ice_for_each_rxq(vsi, i) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "rx-queue-%u.rx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+
+ if (vsi->type != ICE_VSI_PF)
+ return;
+
+ for (i = 0; i < ICE_PF_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "port.%s",
+ ice_gstrings_pf_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+static int ice_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ICE_ALL_STATS_LEN(netdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+ice_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats, u64 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_ring *ring;
+ unsigned int j = 0;
+ int i = 0;
+ char *p;
+
+ for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
+ p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
+ data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+ /* populate per queue stats */
+ rcu_read_lock();
+
+ ice_for_each_txq(vsi, j) {
+ ring = READ_ONCE(vsi->tx_rings[j]);
+ if (!ring)
+ continue;
+ data[i++] = ring->stats.pkts;
+ data[i++] = ring->stats.bytes;
+ }
+
+ ice_for_each_rxq(vsi, j) {
+ ring = READ_ONCE(vsi->rx_rings[j]);
+ data[i++] = ring->stats.pkts;
+ data[i++] = ring->stats.bytes;
+ }
+
+ rcu_read_unlock();
+
+ if (vsi->type != ICE_VSI_PF)
+ return;
+
+ for (j = 0; j < ICE_PF_STATS_LEN; j++) {
+ p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
+ data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+}
+
+static int
+ice_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_link_status *hw_link_info;
+ struct ice_vsi *vsi = np->vsi;
+ bool link_up;
+
+ hw_link_info = &vsi->port_info->phy.link_info;
+ link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+
+ /* set speed and duplex */
+ if (link_up) {
+ switch (hw_link_info->link_speed) {
+ case ICE_AQ_LINK_SPEED_100MB:
+ ks->base.speed = SPEED_100;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ ks->base.speed = SPEED_2500;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ ks->base.speed = SPEED_5000;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ ks->base.speed = SPEED_10000;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ ks->base.speed = SPEED_25000;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ ks->base.speed = SPEED_40000;
+ break;
+ default:
+ ks->base.speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ ks->base.duplex = DUPLEX_FULL;
+ } else {
+ ks->base.speed = SPEED_UNKNOWN;
+ ks->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ /* set autoneg settings */
+ ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ /* set media type settings */
+ switch (vsi->port_info->phy.media_type) {
+ case ICE_MEDIA_FIBER:
+ ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ks->base.port = PORT_FIBRE;
+ break;
+ case ICE_MEDIA_BASET:
+ ethtool_link_ksettings_add_link_mode(ks, supported, TP);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
+ ks->base.port = PORT_TP;
+ break;
+ case ICE_MEDIA_BACKPLANE:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Backplane);
+ ks->base.port = PORT_NONE;
+ break;
+ case ICE_MEDIA_DA:
+ ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
+ ks->base.port = PORT_DA;
+ break;
+ default:
+ ks->base.port = PORT_OTHER;
+ break;
+ }
+
+ /* flow control is symmetric and always supported */
+ ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+
+ switch (vsi->port_info->fc.req_mode) {
+ case ICE_FC_FULL:
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
+ break;
+ case ICE_FC_TX_PAUSE:
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+ break;
+ case ICE_FC_RX_PAUSE:
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+ break;
+ case ICE_FC_PFC:
+ default:
+ ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
+ ethtool_link_ksettings_del_link_mode(ks, advertising,
+ Asym_Pause);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ * @rule_locs: buffer to rturn Rx flow classification rules
+ *
+ * Returns Success if the command is supported.
+ */
+static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 __always_unused *rule_locs)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = vsi->rss_size;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void
+ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ ring->rx_max_pending = ICE_MAX_NUM_DESC;
+ ring->tx_max_pending = ICE_MAX_NUM_DESC;
+ ring->rx_pending = vsi->rx_rings[0]->count;
+ ring->tx_pending = vsi->tx_rings[0]->count;
+ ring->rx_mini_pending = ICE_MIN_NUM_DESC;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int
+ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+{
+ struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ int i, timeout = 50, err = 0;
+ u32 new_rx_cnt, new_tx_cnt;
+
+ if (ring->tx_pending > ICE_MAX_NUM_DESC ||
+ ring->tx_pending < ICE_MIN_NUM_DESC ||
+ ring->rx_pending > ICE_MAX_NUM_DESC ||
+ ring->rx_pending < ICE_MIN_NUM_DESC) {
+ netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+ ring->tx_pending, ring->rx_pending,
+ ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
+ return -EINVAL;
+ }
+
+ new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
+ new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
+
+ /* if nothing to do return success */
+ if (new_tx_cnt == vsi->tx_rings[0]->count &&
+ new_rx_cnt == vsi->rx_rings[0]->count) {
+ netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
+ return 0;
+ }
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ /* set for the next time the netdev is started */
+ if (!netif_running(vsi->netdev)) {
+ for (i = 0; i < vsi->alloc_txq; i++)
+ vsi->tx_rings[i]->count = new_tx_cnt;
+ for (i = 0; i < vsi->alloc_rxq; i++)
+ vsi->rx_rings[i]->count = new_rx_cnt;
+ netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
+ goto done;
+ }
+
+ if (new_tx_cnt == vsi->tx_rings[0]->count)
+ goto process_rx;
+
+ /* alloc updated Tx resources */
+ netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
+ vsi->tx_rings[0]->count, new_tx_cnt);
+
+ tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ sizeof(struct ice_ring), GFP_KERNEL);
+ if (!tx_rings) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < vsi->num_txq; i++) {
+ /* clone ring and setup updated count */
+ tx_rings[i] = *vsi->tx_rings[i];
+ tx_rings[i].count = new_tx_cnt;
+ tx_rings[i].desc = NULL;
+ tx_rings[i].tx_buf = NULL;
+ err = ice_setup_tx_ring(&tx_rings[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ice_clean_tx_ring(&tx_rings[i]);
+ }
+ devm_kfree(&pf->pdev->dev, tx_rings);
+ goto done;
+ }
+ }
+
+process_rx:
+ if (new_rx_cnt == vsi->rx_rings[0]->count)
+ goto process_link;
+
+ /* alloc updated Rx resources */
+ netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
+ vsi->rx_rings[0]->count, new_rx_cnt);
+
+ rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ sizeof(struct ice_ring), GFP_KERNEL);
+ if (!rx_rings) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < vsi->num_rxq; i++) {
+ /* clone ring and setup updated count */
+ rx_rings[i] = *vsi->rx_rings[i];
+ rx_rings[i].count = new_rx_cnt;
+ rx_rings[i].desc = NULL;
+ rx_rings[i].rx_buf = NULL;
+ /* this is to allow wr32 to have something to write to
+ * during early allocation of Rx buffers
+ */
+ rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
+
+ err = ice_setup_rx_ring(&rx_rings[i]);
+ if (err)
+ goto rx_unwind;
+
+ /* allocate Rx buffers */
+ err = ice_alloc_rx_bufs(&rx_rings[i],
+ ICE_DESC_UNUSED(&rx_rings[i]));
+rx_unwind:
+ if (err) {
+ while (i) {
+ i--;
+ ice_free_rx_ring(&rx_rings[i]);
+ }
+ devm_kfree(&pf->pdev->dev, rx_rings);
+ err = -ENOMEM;
+ goto free_tx;
+ }
+ }
+
+process_link:
+ /* Bring interface down, copy in the new ring info, then restore the
+ * interface. if VSI is up, bring it down and then back up
+ */
+ if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
+ ice_down(vsi);
+
+ if (tx_rings) {
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_free_tx_ring(vsi->tx_rings[i]);
+ *vsi->tx_rings[i] = tx_rings[i];
+ }
+ devm_kfree(&pf->pdev->dev, tx_rings);
+ }
+
+ if (rx_rings) {
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_free_rx_ring(vsi->rx_rings[i]);
+ /* copy the real tail offset */
+ rx_rings[i].tail = vsi->rx_rings[i]->tail;
+ /* this is to fake out the allocation routine
+ * into thinking it has to realloc everything
+ * but the recycling logic will let us re-use
+ * the buffers allocated above
+ */
+ rx_rings[i].next_to_use = 0;
+ rx_rings[i].next_to_clean = 0;
+ rx_rings[i].next_to_alloc = 0;
+ *vsi->rx_rings[i] = rx_rings[i];
+ }
+ devm_kfree(&pf->pdev->dev, rx_rings);
+ }
+
+ ice_up(vsi);
+ }
+ goto done;
+
+free_tx:
+ /* error cleanup if the Rx allocations failed after getting Tx */
+ if (tx_rings) {
+ for (i = 0; i < vsi->alloc_txq; i++)
+ ice_free_tx_ring(&tx_rings[i]);
+ devm_kfree(&pf->pdev->dev, tx_rings);
+ }
+
+done:
+ clear_bit(__ICE_CFG_BUSY, pf->state);
+ return err;
+}
+
+static int ice_nway_reset(struct net_device *netdev)
+{
+ /* restart autonegotiation */
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_link_status *hw_link_info;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_port_info *pi;
+ enum ice_status status;
+ bool link_up;
+
+ pi = vsi->port_info;
+ hw_link_info = &pi->phy.link_info;
+ link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+
+ status = ice_aq_set_link_restart_an(pi, link_up, NULL);
+ if (status) {
+ netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
+ status, pi->hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_pauseparam - Get Flow Control status
+ * @netdev: network interface device structure
+ * @pause: ethernet pause (flow control) parameters
+ */
+static void
+ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_port_info *pi;
+
+ pi = np->vsi->port_info;
+ pause->autoneg =
+ ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ if (pi->fc.current_mode == ICE_FC_RX_PAUSE) {
+ pause->rx_pause = 1;
+ } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
+ pause->tx_pause = 1;
+ } else if (pi->fc.current_mode == ICE_FC_FULL) {
+ pause->rx_pause = 1;
+ pause->tx_pause = 1;
+ }
+}
+
+/**
+ * ice_set_pauseparam - Set Flow Control parameter
+ * @netdev: network interface device structure
+ * @pause: return tx/rx flow control status
+ */
+static int
+ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_link_status *hw_link_info;
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_port_info *pi;
+ enum ice_status status;
+ u8 aq_failures;
+ bool link_up;
+ int err = 0;
+
+ pi = vsi->port_info;
+ hw_link_info = &pi->phy.link_info;
+ link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+
+ /* Changing the port's flow control is not supported if this isn't the
+ * PF VSI
+ */
+ if (vsi->type != ICE_VSI_PF) {
+ netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
+ netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If we have link and don't have autoneg */
+ if (!test_bit(__ICE_DOWN, pf->state) &&
+ !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
+ /* Send message that it might not necessarily work*/
+ netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
+ }
+
+ if (pause->rx_pause && pause->tx_pause)
+ pi->fc.req_mode = ICE_FC_FULL;
+ else if (pause->rx_pause && !pause->tx_pause)
+ pi->fc.req_mode = ICE_FC_RX_PAUSE;
+ else if (!pause->rx_pause && pause->tx_pause)
+ pi->fc.req_mode = ICE_FC_TX_PAUSE;
+ else if (!pause->rx_pause && !pause->tx_pause)
+ pi->fc.req_mode = ICE_FC_NONE;
+ else
+ return -EINVAL;
+
+ /* Tell the OS link is going down, the link will go back up when fw
+ * says it is ready asynchronously
+ */
+ ice_print_link_msg(vsi, false);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ /* Set the FC mode and only restart AN if link is up */
+ status = ice_set_fc(pi, &aq_failures, link_up);
+
+ if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ err = -EAGAIN;
+ } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ err = -EAGAIN;
+ } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ err = -EAGAIN;
+ }
+
+ if (!test_bit(__ICE_DOWN, pf->state)) {
+ /* Give it a little more time to try to come back */
+ msleep(75);
+ if (!test_bit(__ICE_DOWN, pf->state))
+ return ice_nway_reset(netdev);
+ }
+
+ return err;
+}
+
+/**
+ * ice_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ */
+static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
+{
+ return ICE_VSIQF_HKEY_ARRAY_SIZE;
+}
+
+/**
+ * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ */
+static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ return np->vsi->rss_table_size;
+}
+
+/**
+ * ice_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function
+ *
+ * Reads the indirection table directly from the hardware.
+ */
+static int
+ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ int ret = 0, i;
+ u8 *lut;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ /* RSS not supported return error here */
+ netdev_warn(netdev, "RSS is not configured on this VSI!\n");
+ return -EIO;
+ }
+
+ lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < vsi->rss_table_size; i++)
+ indir[i] = (u32)(lut[i]);
+
+out:
+ devm_kfree(&pf->pdev->dev, lut);
+ return ret;
+}
+
+/**
+ * ice_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function
+ *
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
+ * returns 0 after programming the table.
+ */
+static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u8 *seed = NULL;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ /* RSS not supported return error here */
+ netdev_warn(netdev, "RSS is not configured on this VSI!\n");
+ return -EIO;
+ }
+
+ if (key) {
+ if (!vsi->rss_hkey_user) {
+ vsi->rss_hkey_user =
+ devm_kzalloc(&pf->pdev->dev,
+ ICE_VSIQF_HKEY_ARRAY_SIZE,
+ GFP_KERNEL);
+ if (!vsi->rss_hkey_user)
+ return -ENOMEM;
+ }
+ memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
+ seed = vsi->rss_hkey_user;
+ }
+
+ if (!vsi->rss_lut_user) {
+ vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
+ vsi->rss_table_size,
+ GFP_KERNEL);
+ if (!vsi->rss_lut_user)
+ return -ENOMEM;
+ }
+
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ if (indir) {
+ int i;
+
+ for (i = 0; i < vsi->rss_table_size; i++)
+ vsi->rss_lut_user[i] = (u8)(indir[i]);
+ } else {
+ ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
+ vsi->rss_size);
+ }
+
+ if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))
+ return -EIO;
+
+ return 0;
+}
+
+static const struct ethtool_ops ice_ethtool_ops = {
+ .get_link_ksettings = ice_get_link_ksettings,
+ .get_drvinfo = ice_get_drvinfo,
+ .get_regs_len = ice_get_regs_len,
+ .get_regs = ice_get_regs,
+ .get_msglevel = ice_get_msglevel,
+ .set_msglevel = ice_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ice_get_strings,
+ .get_ethtool_stats = ice_get_ethtool_stats,
+ .get_sset_count = ice_get_sset_count,
+ .get_rxnfc = ice_get_rxnfc,
+ .get_ringparam = ice_get_ringparam,
+ .set_ringparam = ice_set_ringparam,
+ .nway_reset = ice_nway_reset,
+ .get_pauseparam = ice_get_pauseparam,
+ .set_pauseparam = ice_set_pauseparam,
+ .get_rxfh_key_size = ice_get_rxfh_key_size,
+ .get_rxfh_indir_size = ice_get_rxfh_indir_size,
+ .get_rxfh = ice_get_rxfh,
+ .set_rxfh = ice_set_rxfh,
+};
+
+/**
+ * ice_set_ethtool_ops - setup netdev ethtool ops
+ * @netdev: network interface device structure
+ *
+ * setup netdev ethtool ops with ice specific ops
+ */
+void ice_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ice_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
new file mode 100644
index 000000000000..1b9e2ef48a9d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+/* Machine-generated file */
+
+#ifndef _ICE_HW_AUTOGEN_H_
+#define _ICE_HW_AUTOGEN_H_
+
+#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
+#define PF_FW_ARQBAH 0x00080180
+#define PF_FW_ARQBAL 0x00080080
+#define PF_FW_ARQH 0x00080380
+#define PF_FW_ARQH_ARQH_S 0
+#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, PF_FW_ARQH_ARQH_S)
+#define PF_FW_ARQLEN 0x00080280
+#define PF_FW_ARQLEN_ARQLEN_S 0
+#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, PF_FW_ARQLEN_ARQLEN_S)
+#define PF_FW_ARQLEN_ARQVFE_S 28
+#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S)
+#define PF_FW_ARQLEN_ARQOVFL_S 29
+#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S)
+#define PF_FW_ARQLEN_ARQCRIT_S 30
+#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S)
+#define PF_FW_ARQLEN_ARQENABLE_S 31
+#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S)
+#define PF_FW_ARQT 0x00080480
+#define PF_FW_ATQBAH 0x00080100
+#define PF_FW_ATQBAL 0x00080000
+#define PF_FW_ATQH 0x00080300
+#define PF_FW_ATQH_ATQH_S 0
+#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, PF_FW_ATQH_ATQH_S)
+#define PF_FW_ATQLEN 0x00080200
+#define PF_FW_ATQLEN_ATQLEN_S 0
+#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
+#define PF_FW_ATQLEN_ATQVFE_S 28
+#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S)
+#define PF_FW_ATQLEN_ATQOVFL_S 29
+#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S)
+#define PF_FW_ATQLEN_ATQCRIT_S 30
+#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S)
+#define PF_FW_ATQLEN_ATQENABLE_S 31
+#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
+#define PF_FW_ATQT 0x00080400
+
+#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S)
+#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
+#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
+#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30
+#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045c900 + ((_i) * 4))
+#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0
+#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30
+#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045ca00 + ((_i) * 4))
+#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0
+#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30
+#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S)
+#define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045cb00 + ((_i) * 4))
+#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0
+#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S)
+#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30
+#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S)
+
+#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4))
+#define QRXFLXP_CNTXT_RXDID_IDX_S 0
+#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S)
+#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
+#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S)
+#define QRXFLXP_CNTXT_TS_S 11
+#define QRXFLXP_CNTXT_TS_M BIT(QRXFLXP_CNTXT_TS_S)
+#define GLGEN_RSTAT 0x000B8188
+#define GLGEN_RSTAT_DEVSTATE_S 0
+#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S)
+#define GLGEN_RSTCTL 0x000B8180
+#define GLGEN_RSTCTL_GRSTDEL_S 0
+#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S)
+#define GLGEN_RSTAT_RESET_TYPE_S 2
+#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, GLGEN_RSTAT_RESET_TYPE_S)
+#define GLGEN_RTRIG 0x000B8190
+#define GLGEN_RTRIG_CORER_S 0
+#define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S)
+#define GLGEN_RTRIG_GLOBR_S 1
+#define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S)
+#define GLGEN_STAT 0x000B612C
+#define PFGEN_CTRL 0x00091000
+#define PFGEN_CTRL_PFSWR_S 0
+#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S)
+#define PFGEN_STATE 0x00088000
+#define PRTGEN_STATUS 0x000B8100
+#define PFHMC_ERRORDATA 0x00520500
+#define PFHMC_ERRORINFO 0x00520400
+#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
+#define GLINT_DYN_CTL_INTENA_S 0
+#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S)
+#define GLINT_DYN_CTL_CLEARPBA_S 1
+#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S)
+#define GLINT_DYN_CTL_SWINT_TRIG_S 2
+#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(GLINT_DYN_CTL_SWINT_TRIG_S)
+#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
+#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S)
+#define GLINT_DYN_CTL_INTENA_MSK_S 31
+#define GLINT_DYN_CTL_INTENA_MSK_M BIT(GLINT_DYN_CTL_INTENA_MSK_S)
+#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
+#define PFINT_FW_CTL 0x0016C800
+#define PFINT_FW_CTL_MSIX_INDX_S 0
+#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_FW_CTL_MSIX_INDX_S)
+#define PFINT_FW_CTL_ITR_INDX_S 11
+#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, PFINT_FW_CTL_ITR_INDX_S)
+#define PFINT_FW_CTL_CAUSE_ENA_S 30
+#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
+#define PFINT_OICR 0x0016CA00
+#define PFINT_OICR_INTEVENT_S 0
+#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S)
+#define PFINT_OICR_HLP_RDY_S 14
+#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
+#define PFINT_OICR_CPM_RDY_S 15
+#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
+#define PFINT_OICR_ECC_ERR_S 16
+#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
+#define PFINT_OICR_MAL_DETECT_S 19
+#define PFINT_OICR_MAL_DETECT_M BIT(PFINT_OICR_MAL_DETECT_S)
+#define PFINT_OICR_GRST_S 20
+#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
+#define PFINT_OICR_PCI_EXCEPTION_S 21
+#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
+#define PFINT_OICR_GPIO_S 22
+#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
+#define PFINT_OICR_STORM_DETECT_S 24
+#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
+#define PFINT_OICR_HMC_ERR_S 26
+#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
+#define PFINT_OICR_PE_CRITERR_S 28
+#define PFINT_OICR_PE_CRITERR_M BIT(PFINT_OICR_PE_CRITERR_S)
+#define PFINT_OICR_CTL 0x0016CA80
+#define PFINT_OICR_CTL_MSIX_INDX_S 0
+#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_OICR_CTL_MSIX_INDX_S)
+#define PFINT_OICR_CTL_ITR_INDX_S 11
+#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, PFINT_OICR_CTL_ITR_INDX_S)
+#define PFINT_OICR_CTL_CAUSE_ENA_S 30
+#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S)
+#define PFINT_OICR_ENA 0x0016C900
+#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
+#define QINT_RQCTL_MSIX_INDX_S 0
+#define QINT_RQCTL_ITR_INDX_S 11
+#define QINT_RQCTL_CAUSE_ENA_S 30
+#define QINT_RQCTL_CAUSE_ENA_M BIT(QINT_RQCTL_CAUSE_ENA_S)
+#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
+#define QINT_TQCTL_MSIX_INDX_S 0
+#define QINT_TQCTL_ITR_INDX_S 11
+#define QINT_TQCTL_CAUSE_ENA_S 30
+#define QINT_TQCTL_CAUSE_ENA_M BIT(QINT_TQCTL_CAUSE_ENA_S)
+#define GLLAN_RCTL_0 0x002941F8
+#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
+#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
+#define QRX_CTRL_MAX_INDEX 2047
+#define QRX_CTRL_QENA_REQ_S 0
+#define QRX_CTRL_QENA_REQ_M BIT(QRX_CTRL_QENA_REQ_S)
+#define QRX_CTRL_QENA_STAT_S 2
+#define QRX_CTRL_QENA_STAT_M BIT(QRX_CTRL_QENA_STAT_S)
+#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4))
+#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4))
+#define GLNVM_FLA 0x000B6108
+#define GLNVM_FLA_LOCKED_S 6
+#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S)
+#define GLNVM_GENS 0x000B6100
+#define GLNVM_GENS_SR_SIZE_S 5
+#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S)
+#define GLNVM_ULD 0x000B6008
+#define GLNVM_ULD_CORER_DONE_S 3
+#define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S)
+#define GLNVM_ULD_GLOBR_DONE_S 4
+#define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S)
+#define PF_FUNC_RID 0x0009E880
+#define PF_FUNC_RID_FUNC_NUM_S 0
+#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S)
+#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
+#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
+#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
+#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
+#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
+#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
+#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
+#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
+#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
+#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
+#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
+#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8))
+#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
+#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
+#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
+#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
+#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
+#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
+#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
+#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
+#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
+#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
+#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
+#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
+#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
+#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
+#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
+#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
+#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
+#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
+#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
+#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
+#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
+#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
+#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
+#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
+#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
+#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
+#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
+#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
+#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
+#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
+#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
+#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
+#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
+#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
+#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
+#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
+#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
+#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
+#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
+#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
+#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
+#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
+#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
+#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
+#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
+#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
+#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
+#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
+#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
+#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
+#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
+#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
+#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
+#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
+#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
+#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
+#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
+#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
+#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
+#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
+#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
+#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
+#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
+#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define VSIQF_HKEY_MAX_INDEX 12
+
+#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
new file mode 100644
index 000000000000..d23a91665b46
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -0,0 +1,473 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_LAN_TX_RX_H_
+#define _ICE_LAN_TX_RX_H_
+
+union ice_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ __le16 mirroring_status;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow Director filter id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/PTYPE/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ __le32 reserved;
+ __le32 fd_id;
+ } qword3;
+ } wb; /* writeback */
+};
+
+struct ice_rx_ptype_decoded {
+ u32 ptype:10;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:2;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum ice_rx_ptype_outer_ip {
+ ICE_RX_PTYPE_OUTER_L2 = 0,
+ ICE_RX_PTYPE_OUTER_IP = 1,
+};
+
+enum ice_rx_ptype_outer_ip_ver {
+ ICE_RX_PTYPE_OUTER_NONE = 0,
+ ICE_RX_PTYPE_OUTER_IPV4 = 1,
+ ICE_RX_PTYPE_OUTER_IPV6 = 2,
+};
+
+enum ice_rx_ptype_outer_fragmented {
+ ICE_RX_PTYPE_NOT_FRAG = 0,
+ ICE_RX_PTYPE_FRAG = 1,
+};
+
+enum ice_rx_ptype_tunnel_type {
+ ICE_RX_PTYPE_TUNNEL_NONE = 0,
+ ICE_RX_PTYPE_TUNNEL_IP_IP = 1,
+ ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum ice_rx_ptype_tunnel_end_prot {
+ ICE_RX_PTYPE_TUNNEL_END_NONE = 0,
+ ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum ice_rx_ptype_inner_prot {
+ ICE_RX_PTYPE_INNER_PROT_NONE = 0,
+ ICE_RX_PTYPE_INNER_PROT_UDP = 1,
+ ICE_RX_PTYPE_INNER_PROT_TCP = 2,
+ ICE_RX_PTYPE_INNER_PROT_SCTP = 3,
+ ICE_RX_PTYPE_INNER_PROT_ICMP = 4,
+ ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
+};
+
+enum ice_rx_ptype_payload_layer {
+ ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+/* RX Flex Descriptor
+ * This descriptor is used instead of the legacy version descriptor when
+ * ice_rlan_ctx.adv_desc is set
+ */
+union ice_32b_rx_flex_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ /* Qword 0 */
+ u8 rxdid; /* descriptor builder profile id */
+ u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+ __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+ __le16 pkt_len; /* [15:14] are reserved */
+ __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+ /* sph=[11:11] */
+ /* ff1/ext=[15:12] */
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 flex_meta0;
+ __le16 flex_meta1;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flex_flags2;
+ u8 time_stamp_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le16 flex_meta2;
+ __le16 flex_meta3;
+ union {
+ struct {
+ __le16 flex_meta4;
+ __le16 flex_meta5;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+ } wb; /* writeback */
+};
+
+/* Rx Flex Descriptor NIC Profile
+ * This descriptor corresponds to RxDID 2 which contains
+ * metadata fields for RSS, flow id and timestamp info
+ */
+struct ice_32b_rx_flex_desc_nic {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 rss_hash;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le32 flow_id;
+ union {
+ struct {
+ __le16 vlan_id;
+ __le16 flow_id_ipv6;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
+/* Receive Flex Descriptor profile IDs: There are a total
+ * of 64 profiles where profile IDs 0/1 are for legacy; and
+ * profiles 2-63 are flex profiles that can be programmed
+ * with a specific metadata (profile 7 reserved for HW)
+ */
+enum ice_rxdid {
+ ICE_RXDID_START = 0,
+ ICE_RXDID_LEGACY_0 = ICE_RXDID_START,
+ ICE_RXDID_LEGACY_1,
+ ICE_RXDID_FLX_START,
+ ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START,
+ ICE_RXDID_FLX_LAST = 63,
+ ICE_RXDID_LAST = ICE_RXDID_FLX_LAST
+};
+
+/* Receive Flex Descriptor Rx opcode values */
+#define ICE_RX_OPC_MDID 0x01
+
+/* Receive Descriptor MDID values */
+#define ICE_RX_MDID_FLOW_ID_LOWER 5
+#define ICE_RX_MDID_FLOW_ID_HIGH 6
+#define ICE_RX_MDID_HASH_LOW 56
+#define ICE_RX_MDID_HASH_HIGH 57
+
+/* Rx Flag64 packet flag bits */
+enum ice_rx_flg64_bits {
+ ICE_RXFLG_PKT_DSI = 0,
+ ICE_RXFLG_EVLAN_x8100 = 15,
+ ICE_RXFLG_EVLAN_x9100,
+ ICE_RXFLG_VLAN_x8100,
+ ICE_RXFLG_TNL_MAC = 22,
+ ICE_RXFLG_TNL_VLAN,
+ ICE_RXFLG_PKT_FRG,
+ ICE_RXFLG_FIN = 32,
+ ICE_RXFLG_SYN,
+ ICE_RXFLG_RST,
+ ICE_RXFLG_TNL0 = 38,
+ ICE_RXFLG_TNL1,
+ ICE_RXFLG_TNL2,
+ ICE_RXFLG_UDP_GRE,
+ ICE_RXFLG_RSVD = 63
+};
+
+/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
+#define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
+
+/* for ice_32byte_rx_flex_desc.pkt_length member */
+#define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
+
+enum ice_rx_flex_desc_status_error_0_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
+ ICE_RX_FLEX_DESC_STATUS0_EOF_S,
+ ICE_RX_FLEX_DESC_STATUS0_HBO_S,
+ ICE_RX_FLEX_DESC_STATUS0_L3L4P_S,
+ ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
+ ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
+ ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
+ ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
+ ICE_RX_FLEX_DESC_STATUS0_LPBK_S,
+ ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
+ ICE_RX_FLEX_DESC_STATUS0_RXE_S,
+ ICE_RX_FLEX_DESC_STATUS0_CRCP_S,
+ ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
+ ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
+ ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
+ ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
+ ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
+};
+
+#define ICE_RXQ_CTX_SIZE_DWORDS 8
+#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+
+/* RLAN Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct ice_rlan_ctx {
+ u16 head;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u64 base;
+ u16 qlen;
+#define ICE_RLAN_CTX_DBUF_S 7
+ u16 dbuf; /* bigger than needed, see above for reason */
+#define ICE_RLAN_CTX_HBUF_S 6
+ u16 hbuf; /* bigger than needed, see above for reason */
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u32 rxmax; /* bigger than needed, see above for reason */
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u16 lrxqthresh; /* bigger than needed, see above for reason */
+};
+
+struct ice_ctx_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
+ .offset = offsetof(struct _struct, _ele), \
+ .size_of = FIELD_SIZEOF(struct _struct, _ele), \
+ .width = _width, \
+ .lsb = _lsb, \
+}
+
+/* for hsplit_0 field of Rx RLAN context */
+enum ice_rlan_ctx_rx_hsplit_0 {
+ ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1,
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2,
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* for hsplit_1 field of Rx RLAN context */
+enum ice_rlan_ctx_rx_hsplit_1 {
+ ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0,
+ ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1,
+ ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
+};
+
+/* TX Descriptor */
+struct ice_tx_desc {
+ __le64 buf_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+enum ice_tx_desc_dtype_value {
+ ICE_TX_DESC_DTYPE_DATA = 0x0,
+ ICE_TX_DESC_DTYPE_CTX = 0x1,
+ /* DESC_DONE - HW has completed write-back of descriptor */
+ ICE_TX_DESC_DTYPE_DESC_DONE = 0xF,
+};
+
+#define ICE_TXD_QW1_CMD_S 4
+#define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S)
+
+enum ice_tx_desc_cmd_bits {
+ ICE_TX_DESC_CMD_EOP = 0x0001,
+ ICE_TX_DESC_CMD_RS = 0x0002,
+ ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+};
+
+#define ICE_TXD_QW1_OFFSET_S 16
+#define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S)
+
+enum ice_tx_desc_len_fields {
+ /* Note: These are predefined bit offsets */
+ ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */
+ ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */
+ ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */
+};
+
+#define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S)
+#define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S)
+#define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S)
+
+/* Tx descriptor field limits in bytes */
+#define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \
+ ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD)
+#define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \
+ ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD)
+#define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \
+ ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD)
+
+#define ICE_TXD_QW1_TX_BUF_SZ_S 34
+#define ICE_TXD_QW1_L2TAG1_S 48
+
+/* Context descriptors */
+struct ice_tx_ctx_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 qw1;
+};
+
+#define ICE_TXD_CTX_QW1_CMD_S 4
+#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
+
+#define ICE_TXD_CTX_QW1_TSO_LEN_S 30
+#define ICE_TXD_CTX_QW1_TSO_LEN_M \
+ (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
+
+#define ICE_TXD_CTX_QW1_MSS_S 50
+
+enum ice_tx_ctx_desc_cmd_bits {
+ ICE_TX_CTX_DESC_TSO = 0x01,
+ ICE_TX_CTX_DESC_TSYN = 0x02,
+ ICE_TX_CTX_DESC_IL2TAG2 = 0x04,
+ ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ ICE_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ ICE_TX_CTX_DESC_RESERVED = 0x40
+};
+
+#define ICE_LAN_TXQ_MAX_QGRPS 127
+#define ICE_LAN_TXQ_MAX_QDIS 1023
+
+/* Tx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct ice_tlan_ctx {
+#define ICE_TLAN_CTX_BASE_S 7
+ u64 base; /* base is defined in 128-byte units */
+ u8 port_num;
+ u16 cgd_num; /* bigger than needed, see above for reason */
+ u8 pf_num;
+ u16 vmvf_num;
+ u8 vmvf_type;
+#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
+#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
+ u16 src_vsi;
+ u8 tsyn_ena;
+ u8 alt_vlan;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u8 wb_mode;
+ u8 tphrd_desc;
+ u8 tphrd;
+ u8 tphwr_desc;
+ u16 cmpq_id;
+ u16 qnum_in_func;
+ u8 itr_notification_mode;
+ u8 adjust_prof_id;
+ u32 qlen; /* bigger than needed, see above for reason */
+ u8 quanta_prof_idx;
+ u8 tso_ena;
+ u16 tso_qnum;
+ u8 legacy_int;
+ u8 drop_ena;
+ u8 cache_prof_idx;
+ u8 pkt_shaper_prof_idx;
+ u8 int_q_state; /* width not needed - internal do not write */
+};
+
+/* macro to make the table lines short */
+#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ ICE_RX_PTYPE_OUTER_##OUTER_IP, \
+ ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ ICE_RX_PTYPE_##OUTER_FRAG, \
+ ICE_RX_PTYPE_TUNNEL_##T, \
+ ICE_RX_PTYPE_TUNNEL_END_##TE, \
+ ICE_RX_PTYPE_##TEF, \
+ ICE_RX_PTYPE_INNER_PROT_##I, \
+ ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
+ /* L2 Packet types */
+ ICE_PTT_UNUSED_ENTRY(0),
+ ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+};
+
+static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
+{
+ return ice_ptype_lkup[ptype];
+}
+#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
new file mode 100644
index 000000000000..210b7910f1cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -0,0 +1,5495 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+/* Intel(R) Ethernet Connection E800 Series Linux Driver */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ice.h"
+
+#define DRV_VERSION "ice-0.7.0-k"
+#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
+const char ice_drv_ver[] = DRV_VERSION;
+static const char ice_driver_string[] = DRV_SUMMARY;
+static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
+
+MODULE_AUTHOR("Intel Corporation, <[email protected]>");
+MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int debug = -1;
+module_param(debug, int, 0644);
+#ifndef CONFIG_DYNAMIC_DEBUG
+MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
+#else
+MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
+#endif /* !CONFIG_DYNAMIC_DEBUG */
+
+static struct workqueue_struct *ice_wq;
+static const struct net_device_ops ice_netdev_ops;
+
+static void ice_pf_dis_all_vsi(struct ice_pf *pf);
+static void ice_rebuild(struct ice_pf *pf);
+static int ice_vsi_release(struct ice_vsi *vsi);
+static void ice_update_vsi_stats(struct ice_vsi *vsi);
+static void ice_update_pf_stats(struct ice_pf *pf);
+
+/**
+ * ice_get_free_slot - get the next non-NULL location index in array
+ * @array: array to search
+ * @size: size of the array
+ * @curr: last known occupied index to be used as a search hint
+ *
+ * void * is being used to keep the functionality generic. This lets us use this
+ * function on any array of pointers.
+ */
+static int ice_get_free_slot(void *array, int size, int curr)
+{
+ int **tmp_array = (int **)array;
+ int next;
+
+ if (curr < (size - 1) && !tmp_array[curr + 1]) {
+ next = curr + 1;
+ } else {
+ int i = 0;
+
+ while ((i < size) && (tmp_array[i]))
+ i++;
+ if (i == size)
+ next = ICE_NO_VSI;
+ else
+ next = i;
+ }
+ return next;
+}
+
+/**
+ * ice_search_res - Search the tracker for a block of resources
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ * Returns the base item index of the block, or -ENOMEM for error
+ */
+static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int start = res->search_hint;
+ int end = start;
+
+ id |= ICE_RES_VALID_BIT;
+
+ do {
+ /* skip already allocated entries */
+ if (res->list[end++] & ICE_RES_VALID_BIT) {
+ start = end;
+ if ((start + needed) > res->num_entries)
+ break;
+ }
+
+ if (end == (start + needed)) {
+ int i = start;
+
+ /* there was enough, so assign it to the requestor */
+ while (i != end)
+ res->list[i++] = id;
+
+ if (end == res->num_entries)
+ end = 0;
+
+ res->search_hint = end;
+ return start;
+ }
+ } while (1);
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_get_res - get a block of resources
+ * @pf: board private structure
+ * @res: pointer to the resource
+ * @needed: size of the block needed
+ * @id: identifier to track owner
+ *
+ * Returns the base item index of the block, or -ENOMEM for error
+ * The search_hint trick and lack of advanced fit-finding only works
+ * because we're highly likely to have all the same sized requests.
+ * Linear search time and any fragmentation should be minimal.
+ */
+static int
+ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
+{
+ int ret;
+
+ if (!res || !pf)
+ return -EINVAL;
+
+ if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "param err: needed=%d, num_entries = %d id=0x%04x\n",
+ needed, res->num_entries, id);
+ return -EINVAL;
+ }
+
+ /* search based on search_hint */
+ ret = ice_search_res(res, needed, id);
+
+ if (ret < 0) {
+ /* previous search failed. Reset search hint and try again */
+ res->search_hint = 0;
+ ret = ice_search_res(res, needed, id);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_free_res - free a block of resources
+ * @res: pointer to the resource
+ * @index: starting index previously returned by ice_get_res
+ * @id: identifier to track owner
+ * Returns number of resources freed
+ */
+static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
+{
+ int count = 0;
+ int i;
+
+ if (!res || index >= res->num_entries)
+ return -EINVAL;
+
+ id |= ICE_RES_VALID_BIT;
+ for (i = index; i < res->num_entries && res->list[i] == id; i++) {
+ res->list[i] = 0;
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * ice_add_mac_to_list - Add a mac address filter entry to the list
+ * @vsi: the VSI to be forwarded to
+ * @add_list: pointer to the list which contains MAC filter entries
+ * @macaddr: the MAC address to be added.
+ *
+ * Adds mac address filter entry to the temp list
+ *
+ * Returns 0 on success or ENOMEM on failure.
+ */
+static int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+ const u8 *macaddr)
+{
+ struct ice_fltr_list_entry *tmp;
+ struct ice_pf *pf = vsi->back;
+
+ tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->fltr_info.flag = ICE_FLTR_TX;
+ tmp->fltr_info.src = vsi->vsi_num;
+ tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
+ ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
+
+ INIT_LIST_HEAD(&tmp->list_entry);
+ list_add(&tmp->list_entry, add_list);
+
+ return 0;
+}
+
+/**
+ * ice_add_mac_to_sync_list - creates list of mac addresses to be synced
+ * @netdev: the net device on which the sync is happening
+ * @addr: mac address to sync
+ *
+ * This is a callback function which is called by the in kernel device sync
+ * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
+ * populates the tmp_sync_list, which is later used by ice_add_mac to add the
+ * mac filters from the hardware.
+ */
+static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced
+ * @netdev: the net device on which the unsync is happening
+ * @addr: mac address to unsync
+ *
+ * This is a callback function which is called by the in kernel device unsync
+ * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
+ * populates the tmp_unsync_list, which is later used by ice_remove_mac to
+ * delete the mac filters from the hardware.
+ */
+static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ice_free_fltr_list - free filter lists helper
+ * @dev: pointer to the device struct
+ * @h: pointer to the list head to be freed
+ *
+ * Helper function to free filter lists previously created using
+ * ice_add_mac_to_list
+ */
+static void ice_free_fltr_list(struct device *dev, struct list_head *h)
+{
+ struct ice_fltr_list_entry *e, *tmp;
+
+ list_for_each_entry_safe(e, tmp, h, list_entry) {
+ list_del(&e->list_entry);
+ devm_kfree(dev, e);
+ }
+}
+
+/**
+ * ice_vsi_fltr_changed - check if filter state changed
+ * @vsi: VSI to be checked
+ *
+ * returns true if filter state has changed, false otherwise.
+ */
+static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
+{
+ return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
+ test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
+ test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+}
+
+/**
+ * ice_vsi_sync_fltr - Update the VSI filter list to the HW
+ * @vsi: ptr to the VSI
+ *
+ * Push any outstanding VSI filter changes through the AdminQ.
+ */
+static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct net_device *netdev = vsi->netdev;
+ bool promisc_forced_on = false;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status = 0;
+ u32 changed_flags = 0;
+ int err = 0;
+
+ if (!vsi->netdev)
+ return -EINVAL;
+
+ while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
+ usleep_range(1000, 2000);
+
+ changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
+ vsi->current_netdev_flags = vsi->netdev->flags;
+
+ INIT_LIST_HEAD(&vsi->tmp_sync_list);
+ INIT_LIST_HEAD(&vsi->tmp_unsync_list);
+
+ if (ice_vsi_fltr_changed(vsi)) {
+ clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
+ clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
+ clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+
+ /* grab the netdev's addr_list_lock */
+ netif_addr_lock_bh(netdev);
+ __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
+ ice_add_mac_to_unsync_list);
+ __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
+ ice_add_mac_to_unsync_list);
+ /* our temp lists are populated. release lock */
+ netif_addr_unlock_bh(netdev);
+ }
+
+ /* Remove mac addresses in the unsync list */
+ status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
+ ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
+ if (status) {
+ netdev_err(netdev, "Failed to delete MAC filters\n");
+ /* if we failed because of alloc failures, just bail */
+ if (status == ICE_ERR_NO_MEMORY) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* Add mac addresses in the sync list */
+ status = ice_add_mac(hw, &vsi->tmp_sync_list);
+ ice_free_fltr_list(dev, &vsi->tmp_sync_list);
+ if (status) {
+ netdev_err(netdev, "Failed to add MAC filters\n");
+ /* If there is no more space for new umac filters, vsi
+ * should go into promiscuous mode. There should be some
+ * space reserved for promiscuous filters.
+ */
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
+ !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
+ vsi->state)) {
+ promisc_forced_on = true;
+ netdev_warn(netdev,
+ "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
+ vsi->vsi_num);
+ } else {
+ err = -EIO;
+ goto out;
+ }
+ }
+ /* check for changes in promiscuous modes */
+ if (changed_flags & IFF_ALLMULTI)
+ netdev_warn(netdev, "Unsupported configuration\n");
+
+ if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
+ test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
+ clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
+ if (vsi->current_netdev_flags & IFF_PROMISC) {
+ /* Apply TX filter rule to get traffic from VMs */
+ status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
+ ICE_FLTR_TX);
+ if (status) {
+ netdev_err(netdev, "Error setting default VSI %i tx rule\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags &= ~IFF_PROMISC;
+ err = -EIO;
+ goto out_promisc;
+ }
+ /* Apply RX filter rule to get traffic from wire */
+ status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, true,
+ ICE_FLTR_RX);
+ if (status) {
+ netdev_err(netdev, "Error setting default VSI %i rx rule\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags &= ~IFF_PROMISC;
+ err = -EIO;
+ goto out_promisc;
+ }
+ } else {
+ /* Clear TX filter rule to stop traffic from VMs */
+ status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
+ ICE_FLTR_TX);
+ if (status) {
+ netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_PROMISC;
+ err = -EIO;
+ goto out_promisc;
+ }
+ /* Clear filter RX to remove traffic from wire */
+ status = ice_cfg_dflt_vsi(hw, vsi->vsi_num, false,
+ ICE_FLTR_RX);
+ if (status) {
+ netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_PROMISC;
+ err = -EIO;
+ goto out_promisc;
+ }
+ }
+ }
+ goto exit;
+
+out_promisc:
+ set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
+ goto exit;
+out:
+ /* if something went wrong then set the changed flag so we try again */
+ set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
+ set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
+exit:
+ clear_bit(__ICE_CFG_BUSY, vsi->state);
+ return err;
+}
+
+/**
+ * ice_sync_fltr_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ */
+static void ice_sync_fltr_subtask(struct ice_pf *pf)
+{
+ int v;
+
+ if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
+ return;
+
+ clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
+
+ for (v = 0; v < pf->num_alloc_vsi; v++)
+ if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
+ ice_vsi_sync_fltr(pf->vsi[v])) {
+ /* come back and try again later */
+ set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
+ break;
+ }
+}
+
+/**
+ * ice_is_reset_recovery_pending - schedule a reset
+ * @state: pf state field
+ */
+static bool ice_is_reset_recovery_pending(unsigned long int *state)
+{
+ return test_bit(__ICE_RESET_RECOVERY_PENDING, state);
+}
+
+/**
+ * ice_prepare_for_reset - prep for the core to reset
+ * @pf: board private structure
+ *
+ * Inform or close all dependent features in prep for reset.
+ */
+static void
+ice_prepare_for_reset(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 v;
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v])
+ ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num);
+
+ dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+
+ /* disable the VSIs and their queues that are not already DOWN */
+ /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */
+ ice_pf_dis_all_vsi(pf);
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v])
+ pf->vsi[v]->vsi_num = 0;
+
+ ice_shutdown_all_ctrlq(hw);
+}
+
+/**
+ * ice_do_reset - Initiate one of many types of resets
+ * @pf: board private structure
+ * @reset_type: reset type requested
+ * before this function was called.
+ */
+static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct device *dev = &pf->pdev->dev;
+ struct ice_hw *hw = &pf->hw;
+
+ dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
+ WARN_ON(in_interrupt());
+
+ /* PFR is a bit of a special case because it doesn't result in an OICR
+ * interrupt. So for PFR, we prepare for reset, issue the reset and
+ * rebuild sequentially.
+ */
+ if (reset_type == ICE_RESET_PFR) {
+ set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ ice_prepare_for_reset(pf);
+ }
+
+ /* trigger the reset */
+ if (ice_reset(hw, reset_type)) {
+ dev_err(dev, "reset %d failed\n", reset_type);
+ set_bit(__ICE_RESET_FAILED, pf->state);
+ clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ return;
+ }
+
+ if (reset_type == ICE_RESET_PFR) {
+ pf->pfr_count++;
+ ice_rebuild(pf);
+ clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ }
+}
+
+/**
+ * ice_reset_subtask - Set up for resetting the device and driver
+ * @pf: board private structure
+ */
+static void ice_reset_subtask(struct ice_pf *pf)
+{
+ enum ice_reset_req reset_type;
+
+ rtnl_lock();
+
+ /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
+ * OICR interrupt. The OICR handler (ice_misc_intr) determines what
+ * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in
+ * pf->state. So if reset/recovery is pending (as indicated by this bit)
+ * we do a rebuild and return.
+ */
+ if (ice_is_reset_recovery_pending(pf->state)) {
+ clear_bit(__ICE_GLOBR_RECV, pf->state);
+ clear_bit(__ICE_CORER_RECV, pf->state);
+ ice_prepare_for_reset(pf);
+
+ /* make sure we are ready to rebuild */
+ if (ice_check_reset(&pf->hw))
+ set_bit(__ICE_RESET_FAILED, pf->state);
+ else
+ ice_rebuild(pf);
+ clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ goto unlock;
+ }
+
+ /* No pending resets to finish processing. Check for new resets */
+ if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state))
+ reset_type = ICE_RESET_GLOBR;
+ else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state))
+ reset_type = ICE_RESET_CORER;
+ else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state))
+ reset_type = ICE_RESET_PFR;
+ else
+ goto unlock;
+
+ /* reset if not already down or resetting */
+ if (!test_bit(__ICE_DOWN, pf->state) &&
+ !test_bit(__ICE_CFG_BUSY, pf->state)) {
+ ice_do_reset(pf, reset_type);
+ }
+
+unlock:
+ rtnl_unlock();
+}
+
+/**
+ * ice_watchdog_subtask - periodic tasks not using event driven scheduling
+ * @pf: board private structure
+ */
+static void ice_watchdog_subtask(struct ice_pf *pf)
+{
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__ICE_DOWN, pf->state) ||
+ test_bit(__ICE_CFG_BUSY, pf->state))
+ return;
+
+ /* make sure we don't do these things too often */
+ if (time_before(jiffies,
+ pf->serv_tmr_prev + pf->serv_tmr_period))
+ return;
+
+ pf->serv_tmr_prev = jiffies;
+
+ /* Update the stats for active netdevs so the network stack
+ * can look at updated numbers whenever it cares to
+ */
+ ice_update_pf_stats(pf);
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && pf->vsi[i]->netdev)
+ ice_update_vsi_stats(pf->vsi[i]);
+}
+
+/**
+ * ice_print_link_msg - print link up or down message
+ * @vsi: the VSI whose link status is being queried
+ * @isup: boolean for if the link is now up or down
+ */
+void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
+{
+ const char *speed;
+ const char *fc;
+
+ if (vsi->current_isup == isup)
+ return;
+
+ vsi->current_isup = isup;
+
+ if (!isup) {
+ netdev_info(vsi->netdev, "NIC Link is Down\n");
+ return;
+ }
+
+ switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_40GB:
+ speed = "40 G";
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = "25 G";
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = "20 G";
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = "10 G";
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = "5 G";
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ speed = "2.5 G";
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ speed = "1 G";
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = "100 M";
+ break;
+ default:
+ speed = "Unknown";
+ break;
+ }
+
+ switch (vsi->port_info->fc.current_mode) {
+ case ICE_FC_FULL:
+ fc = "RX/TX";
+ break;
+ case ICE_FC_TX_PAUSE:
+ fc = "TX";
+ break;
+ case ICE_FC_RX_PAUSE:
+ fc = "RX";
+ break;
+ default:
+ fc = "Unknown";
+ break;
+ }
+
+ netdev_info(vsi->netdev, "NIC Link is up %sbps, Flow Control: %s\n",
+ speed, fc);
+}
+
+/**
+ * ice_init_link_events - enable/initialize link events
+ * @pi: pointer to the port_info instance
+ *
+ * Returns -EIO on failure, 0 on success
+ */
+static int ice_init_link_events(struct ice_port_info *pi)
+{
+ u16 mask;
+
+ mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
+ ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
+
+ if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
+ dev_dbg(ice_hw_to_dev(pi->hw),
+ "Failed to set link event mask for port %d\n",
+ pi->lport);
+ return -EIO;
+ }
+
+ if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
+ dev_dbg(ice_hw_to_dev(pi->hw),
+ "Failed to enable link events for port %d\n",
+ pi->lport);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_link_event - update the vsi's netdev
+ * @vsi: the vsi on which the link event occurred
+ * @link_up: whether or not the vsi needs to be set up or down
+ */
+static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
+{
+ if (!vsi || test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ if (vsi->type == ICE_VSI_PF) {
+ if (!vsi->netdev) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "vsi->netdev is not initialized!\n");
+ return;
+ }
+ if (link_up) {
+ netif_carrier_on(vsi->netdev);
+ netif_tx_wake_all_queues(vsi->netdev);
+ } else {
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_all_queues(vsi->netdev);
+ }
+ }
+}
+
+/**
+ * ice_link_event - process the link event
+ * @pf: pf that the link event is associated with
+ * @pi: port_info for the port that the link event is associated with
+ *
+ * Returns -EIO if ice_get_link_status() fails
+ * Returns 0 on success
+ */
+static int
+ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ u8 new_link_speed, old_link_speed;
+ struct ice_phy_info *phy_info;
+ bool new_link_same_as_old;
+ bool new_link, old_link;
+ u8 lport;
+ u16 v;
+
+ phy_info = &pi->phy;
+ phy_info->link_info_old = phy_info->link_info;
+ /* Force ice_get_link_status() to update link info */
+ phy_info->get_link_info = true;
+
+ old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
+ old_link_speed = phy_info->link_info_old.link_speed;
+
+ lport = pi->lport;
+ if (ice_get_link_status(pi, &new_link)) {
+ dev_dbg(&pf->pdev->dev,
+ "Could not get link status for port %d\n", lport);
+ return -EIO;
+ }
+
+ new_link_speed = phy_info->link_info.link_speed;
+
+ new_link_same_as_old = (new_link == old_link &&
+ new_link_speed == old_link_speed);
+
+ ice_for_each_vsi(pf, v) {
+ struct ice_vsi *vsi = pf->vsi[v];
+
+ if (!vsi || !vsi->port_info)
+ continue;
+
+ if (new_link_same_as_old &&
+ (test_bit(__ICE_DOWN, vsi->state) ||
+ new_link == netif_carrier_ok(vsi->netdev)))
+ continue;
+
+ if (vsi->port_info->lport == lport) {
+ ice_print_link_msg(vsi, new_link);
+ ice_vsi_link_event(vsi, new_link);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_handle_link_event - handle link event via ARQ
+ * @pf: pf that the link event is associated with
+ *
+ * Return -EINVAL if port_info is null
+ * Return status on succes
+ */
+static int ice_handle_link_event(struct ice_pf *pf)
+{
+ struct ice_port_info *port_info;
+ int status;
+
+ port_info = pf->hw.port_info;
+ if (!port_info)
+ return -EINVAL;
+
+ status = ice_link_event(pf, port_info);
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Could not process link event, error %d\n", status);
+
+ return status;
+}
+
+/**
+ * __ice_clean_ctrlq - helper function to clean controlq rings
+ * @pf: ptr to struct ice_pf
+ * @q_type: specific Control queue type
+ */
+static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
+{
+ struct ice_rq_event_info event;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_ctl_q_info *cq;
+ u16 pending, i = 0;
+ const char *qtype;
+ u32 oldval, val;
+
+ /* Do not clean control queue if/when PF reset fails */
+ if (test_bit(__ICE_RESET_FAILED, pf->state))
+ return 0;
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ cq = &hw->adminq;
+ qtype = "Admin";
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n",
+ q_type);
+ return 0;
+ }
+
+ /* check for error indications - PF_xx_AxQLEN register layout for
+ * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
+ */
+ val = rd32(hw, cq->rq.len);
+ if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
+ PF_FW_ARQLEN_ARQCRIT_M)) {
+ oldval = val;
+ if (val & PF_FW_ARQLEN_ARQVFE_M)
+ dev_dbg(&pf->pdev->dev,
+ "%s Receive Queue VF Error detected\n", qtype);
+ if (val & PF_FW_ARQLEN_ARQOVFL_M) {
+ dev_dbg(&pf->pdev->dev,
+ "%s Receive Queue Overflow Error detected\n",
+ qtype);
+ }
+ if (val & PF_FW_ARQLEN_ARQCRIT_M)
+ dev_dbg(&pf->pdev->dev,
+ "%s Receive Queue Critical Error detected\n",
+ qtype);
+ val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
+ PF_FW_ARQLEN_ARQCRIT_M);
+ if (oldval != val)
+ wr32(hw, cq->rq.len, val);
+ }
+
+ val = rd32(hw, cq->sq.len);
+ if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
+ PF_FW_ATQLEN_ATQCRIT_M)) {
+ oldval = val;
+ if (val & PF_FW_ATQLEN_ATQVFE_M)
+ dev_dbg(&pf->pdev->dev,
+ "%s Send Queue VF Error detected\n", qtype);
+ if (val & PF_FW_ATQLEN_ATQOVFL_M) {
+ dev_dbg(&pf->pdev->dev,
+ "%s Send Queue Overflow Error detected\n",
+ qtype);
+ }
+ if (val & PF_FW_ATQLEN_ATQCRIT_M)
+ dev_dbg(&pf->pdev->dev,
+ "%s Send Queue Critical Error detected\n",
+ qtype);
+ val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
+ PF_FW_ATQLEN_ATQCRIT_M);
+ if (oldval != val)
+ wr32(hw, cq->sq.len, val);
+ }
+
+ event.buf_len = cq->rq_buf_size;
+ event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len,
+ GFP_KERNEL);
+ if (!event.msg_buf)
+ return 0;
+
+ do {
+ enum ice_status ret;
+ u16 opcode;
+
+ ret = ice_clean_rq_elem(hw, cq, &event, &pending);
+ if (ret == ICE_ERR_AQ_NO_WORK)
+ break;
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "%s Receive Queue event error %d\n", qtype,
+ ret);
+ break;
+ }
+
+ opcode = le16_to_cpu(event.desc.opcode);
+
+ switch (opcode) {
+ case ice_aqc_opc_get_link_status:
+ if (ice_handle_link_event(pf))
+ dev_err(&pf->pdev->dev,
+ "Could not handle link event");
+ break;
+ default:
+ dev_dbg(&pf->pdev->dev,
+ "%s Receive Queue unknown event 0x%04x ignored\n",
+ qtype, opcode);
+ break;
+ }
+ } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
+
+ devm_kfree(&pf->pdev->dev, event.msg_buf);
+
+ return pending && (i == ICE_DFLT_IRQ_WORK);
+}
+
+/**
+ * ice_clean_adminq_subtask - clean the AdminQ rings
+ * @pf: board private structure
+ */
+static void ice_clean_adminq_subtask(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
+ return;
+
+ if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
+ return;
+
+ clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
+
+ /* re-enable Admin queue interrupt causes */
+ val = rd32(hw, PFINT_FW_CTL);
+ wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M));
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_service_task_schedule - schedule the service task to wake up
+ * @pf: board private structure
+ *
+ * If not already scheduled, this puts the task into the work queue.
+ */
+static void ice_service_task_schedule(struct ice_pf *pf)
+{
+ if (!test_bit(__ICE_DOWN, pf->state) &&
+ !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state))
+ queue_work(ice_wq, &pf->serv_task);
+}
+
+/**
+ * ice_service_task_complete - finish up the service task
+ * @pf: board private structure
+ */
+static void ice_service_task_complete(struct ice_pf *pf)
+{
+ WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
+
+ /* force memory (pf->state) to sync before next service task */
+ smp_mb__before_atomic();
+ clear_bit(__ICE_SERVICE_SCHED, pf->state);
+}
+
+/**
+ * ice_service_timer - timer callback to schedule service task
+ * @t: pointer to timer_list
+ */
+static void ice_service_timer(struct timer_list *t)
+{
+ struct ice_pf *pf = from_timer(pf, t, serv_tmr);
+
+ mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
+ ice_service_task_schedule(pf);
+}
+
+/**
+ * ice_service_task - manage and run subtasks
+ * @work: pointer to work_struct contained by the PF struct
+ */
+static void ice_service_task(struct work_struct *work)
+{
+ struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
+ unsigned long start_time = jiffies;
+
+ /* subtasks */
+
+ /* process reset requests first */
+ ice_reset_subtask(pf);
+
+ /* bail if a reset/recovery cycle is pending */
+ if (ice_is_reset_recovery_pending(pf->state) ||
+ test_bit(__ICE_SUSPENDED, pf->state)) {
+ ice_service_task_complete(pf);
+ return;
+ }
+
+ ice_sync_fltr_subtask(pf);
+ ice_watchdog_subtask(pf);
+ ice_clean_adminq_subtask(pf);
+
+ /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
+ ice_service_task_complete(pf);
+
+ /* If the tasks have taken longer than one service timer period
+ * or there is more work to be done, reset the service timer to
+ * schedule the service task now.
+ */
+ if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
+ test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
+ mod_timer(&pf->serv_tmr, jiffies);
+}
+
+/**
+ * ice_set_ctrlq_len - helper function to set controlq length
+ * @hw: pointer to the hw instance
+ */
+static void ice_set_ctrlq_len(struct ice_hw *hw)
+{
+ hw->adminq.num_rq_entries = ICE_AQ_LEN;
+ hw->adminq.num_sq_entries = ICE_AQ_LEN;
+ hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
+ hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
+}
+
+/**
+ * ice_irq_affinity_notify - Callback for affinity changes
+ * @notify: context as to what irq was changed
+ * @mask: the new affinity mask
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * so that we may register to receive changes to the irq affinity masks.
+ */
+static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct ice_q_vector *q_vector =
+ container_of(notify, struct ice_q_vector, affinity_notify);
+
+ cpumask_copy(&q_vector->affinity_mask, mask);
+}
+
+/**
+ * ice_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * to inform the current notification subscriber that they will no longer
+ * receive notifications.
+ */
+static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
+
+/**
+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ */
+static void ice_vsi_dis_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ u32 val;
+ int i;
+
+ /* disable interrupt causation from each queue */
+ if (vsi->tx_rings) {
+ ice_for_each_txq(vsi, i) {
+ if (vsi->tx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->tx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_TQCTL(reg));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(reg), val);
+ }
+ }
+ }
+
+ if (vsi->rx_rings) {
+ ice_for_each_rxq(vsi, i) {
+ if (vsi->rx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->rx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+ }
+ }
+ }
+
+ /* disable each interrupt */
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ for (i = vsi->base_vector;
+ i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ wr32(hw, GLINT_DYN_CTL(i), 0);
+
+ ice_flush(hw);
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ synchronize_irq(pf->msix_entries[i + base].vector);
+ }
+}
+
+/**
+ * ice_vsi_ena_irq - Enable IRQ for the given VSI
+ * @vsi: the VSI being configured
+ */
+static int ice_vsi_ena_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ int i;
+
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
+ }
+
+ ice_flush(hw);
+ return 0;
+}
+
+/**
+ * ice_vsi_delete - delete a VSI from the switch
+ * @vsi: pointer to VSI being removed
+ */
+static void ice_vsi_delete(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_vsi_ctx ctxt;
+ enum ice_status status;
+
+ ctxt.vsi_num = vsi->vsi_num;
+
+ memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
+
+ status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
+ if (status)
+ dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
+ vsi->vsi_num);
+}
+
+/**
+ * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ */
+static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
+{
+ int q_vectors = vsi->num_q_vectors;
+ struct ice_pf *pf = vsi->back;
+ int base = vsi->base_vector;
+ int rx_int_idx = 0;
+ int tx_int_idx = 0;
+ int vector, err;
+ int irq_num;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[vector];
+
+ irq_num = pf->msix_entries[base + vector].vector;
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "TxRx", rx_int_idx++);
+ tx_int_idx++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "rx", rx_int_idx++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "tx", tx_int_idx++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = devm_request_irq(&pf->pdev->dev,
+ pf->msix_entries[base + vector].vector,
+ vsi->irq_handler, 0, q_vector->name,
+ q_vector);
+ if (err) {
+ netdev_err(vsi->netdev,
+ "MSIX request_irq failed, error: %d\n", err);
+ goto free_q_irqs;
+ }
+
+ /* register for affinity change notifications */
+ q_vector->affinity_notify.notify = ice_irq_affinity_notify;
+ q_vector->affinity_notify.release = ice_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
+
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ }
+
+ vsi->irqs_ready = true;
+ return 0;
+
+free_q_irqs:
+ while (vector) {
+ vector--;
+ irq_num = pf->msix_entries[base + vector].vector,
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_set_affinity_hint(irq_num, NULL);
+ devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]);
+ }
+ return err;
+}
+
+/**
+ * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
+ * @vsi: the VSI being configured
+ */
+static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
+{
+ struct ice_hw_common_caps *cap;
+ struct ice_pf *pf = vsi->back;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ vsi->rss_size = 1;
+ return;
+ }
+
+ cap = &pf->hw.func_caps.common_cap;
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ /* PF VSI will inherit RSS instance of PF */
+ vsi->rss_table_size = cap->rss_table_size;
+ vsi->rss_size = min_t(int, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_vsi_setup_q_map - Setup a VSI queue map
+ * @vsi: the VSI being configured
+ * @ctxt: VSI context structure
+ */
+static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+{
+ u16 offset = 0, qmap = 0, numq_tc;
+ u16 pow = 0, max_rss = 0, qcount;
+ u16 qcount_tx = vsi->alloc_txq;
+ u16 qcount_rx = vsi->alloc_rxq;
+ bool ena_tc0 = false;
+ int i;
+
+ /* at least TC0 should be enabled by default */
+ if (vsi->tc_cfg.numtc) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(0)))
+ ena_tc0 = true;
+ } else {
+ ena_tc0 = true;
+ }
+
+ if (ena_tc0) {
+ vsi->tc_cfg.numtc++;
+ vsi->tc_cfg.ena_tc |= 1;
+ }
+
+ numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+
+ /* TC mapping is a function of the number of Rx queues assigned to the
+ * VSI for each traffic class and the offset of these queues.
+ * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
+ * queues allocated to TC0. No:of queues is a power-of-2.
+ *
+ * If TC is not enabled, the queue offset is set to 0, and allocate one
+ * queue, this way, traffic for the given TC will be sent to the default
+ * queue.
+ *
+ * Setup number and offset of Rx queues for all TCs for the VSI
+ */
+
+ /* qcount will change if RSS is enabled */
+ if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
+ if (vsi->type == ICE_VSI_PF)
+ max_rss = ICE_MAX_LG_RSS_QS;
+ else
+ max_rss = ICE_MAX_SMALL_RSS_QS;
+
+ qcount = min_t(int, numq_tc, max_rss);
+ qcount = min_t(int, qcount, vsi->rss_size);
+ } else {
+ qcount = numq_tc;
+ }
+
+ /* find higher power-of-2 of qcount */
+ pow = ilog2(qcount);
+
+ if (!is_power_of_2(qcount))
+ pow++;
+
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
+ /* TC is not enabled */
+ vsi->tc_cfg.tc_info[i].qoffset = 0;
+ vsi->tc_cfg.tc_info[i].qcount = 1;
+ ctxt->info.tc_mapping[i] = 0;
+ continue;
+ }
+
+ /* TC is enabled */
+ vsi->tc_cfg.tc_info[i].qoffset = offset;
+ vsi->tc_cfg.tc_info[i].qcount = qcount;
+
+ qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
+ ICE_AQ_VSI_TC_Q_NUM_M);
+ offset += qcount;
+ ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+ }
+
+ vsi->num_txq = qcount_tx;
+ vsi->num_rxq = offset;
+
+ /* Rx queue mapping */
+ ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
+ /* q_mapping buffer holds the info for the first queue allocated for
+ * this VSI in the PF space and also the number of queues associated
+ * with this VSI.
+ */
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
+ ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
+}
+
+/**
+ * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ *
+ * This initializes a default VSI context for all sections except the Queues.
+ */
+static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
+{
+ u32 table = 0;
+
+ memset(&ctxt->info, 0, sizeof(ctxt->info));
+ /* VSI's should be allocated from shared pool */
+ ctxt->alloc_from_pool = true;
+ /* Src pruning enabled by default */
+ ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
+ /* Traffic from VSI can be sent to LAN */
+ ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
+ /* Allow all packets untagged/tagged */
+ ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL &
+ ICE_AQ_VSI_PVLAN_MODE_M) >>
+ ICE_AQ_VSI_PVLAN_MODE_S);
+ /* Show VLAN/UP from packets in Rx descriptors */
+ ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH &
+ ICE_AQ_VSI_PVLAN_EMOD_M) >>
+ ICE_AQ_VSI_PVLAN_EMOD_S);
+ /* Have 1:1 UP mapping for both ingress/egress tables */
+ table |= ICE_UP_TABLE_TRANSLATE(0, 0);
+ table |= ICE_UP_TABLE_TRANSLATE(1, 1);
+ table |= ICE_UP_TABLE_TRANSLATE(2, 2);
+ table |= ICE_UP_TABLE_TRANSLATE(3, 3);
+ table |= ICE_UP_TABLE_TRANSLATE(4, 4);
+ table |= ICE_UP_TABLE_TRANSLATE(5, 5);
+ table |= ICE_UP_TABLE_TRANSLATE(6, 6);
+ table |= ICE_UP_TABLE_TRANSLATE(7, 7);
+ ctxt->info.ingress_table = cpu_to_le32(table);
+ ctxt->info.egress_table = cpu_to_le32(table);
+ /* Have 1:1 UP mapping for outer to inner UP table */
+ ctxt->info.outer_up_table = cpu_to_le32(table);
+ /* No Outer tag support outer_tag_flags remains to zero */
+}
+
+/**
+ * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
+ * @ctxt: the VSI context being set
+ * @vsi: the VSI being configured
+ */
+static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
+{
+ u8 lut_type, hash_type;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ /* PF VSI will inherit RSS instance of PF */
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ return;
+ }
+
+ ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+ ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+}
+
+/**
+ * ice_vsi_add - Create a new VSI or fetch preallocated VSI
+ * @vsi: the VSI being configured
+ *
+ * This initializes a VSI context depending on the VSI type to be added and
+ * passes it down to the add_vsi aq command to create a new VSI.
+ */
+static int ice_vsi_add(struct ice_vsi *vsi)
+{
+ struct ice_vsi_ctx ctxt = { 0 };
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int ret = 0;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ ctxt.flags = ICE_AQ_VSI_TYPE_PF;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ice_set_dflt_vsi_ctx(&ctxt);
+ /* if the switch is in VEB mode, allow VSI loopback */
+ if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
+ ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
+
+ /* Set LUT type and HASH type if RSS is enabled */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_set_rss_vsi_ctx(&ctxt, vsi);
+
+ ctxt.info.sw_id = vsi->port_info->sw_id;
+ ice_vsi_setup_q_map(vsi, &ctxt);
+
+ ret = ice_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret) {
+ dev_err(&vsi->back->pdev->dev,
+ "Add VSI AQ call failed, err %d\n", ret);
+ return -EIO;
+ }
+ vsi->info = ctxt.info;
+ vsi->vsi_num = ctxt.vsi_num;
+
+ return ret;
+}
+
+/**
+ * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
+ * @vsi: the VSI being cleaned up
+ */
+static void ice_vsi_release_msix(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 vector = vsi->base_vector;
+ struct ice_hw *hw = &pf->hw;
+ u32 txq = 0;
+ u32 rxq = 0;
+ int i, q;
+
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
+ wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
+ for (q = 0; q < q_vector->num_ring_tx; q++) {
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
+ txq++;
+ }
+
+ for (q = 0; q < q_vector->num_ring_rx; q++) {
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
+ rxq++;
+ }
+ }
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
+ * @vsi: the VSI having rings deallocated
+ */
+static void ice_vsi_clear_rings(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ if (vsi->tx_rings[i]) {
+ kfree_rcu(vsi->tx_rings[i], rcu);
+ vsi->tx_rings[i] = NULL;
+ }
+ }
+ }
+ if (vsi->rx_rings) {
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ if (vsi->rx_rings[i]) {
+ kfree_rcu(vsi->rx_rings[i], rcu);
+ vsi->rx_rings[i] = NULL;
+ }
+ }
+ }
+}
+
+/**
+ * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
+ * @vsi: VSI which is having rings allocated
+ */
+static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ /* Allocate tx_rings */
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ struct ice_ring *ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+
+ if (!ring)
+ goto err_out;
+
+ ring->q_index = i;
+ ring->reg_idx = vsi->txq_map[i];
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+
+ vsi->tx_rings[i] = ring;
+ }
+
+ /* Allocate rx_rings */
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ struct ice_ring *ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_out;
+
+ ring->q_index = i;
+ ring->reg_idx = vsi->rxq_map[i];
+ ring->ring_active = false;
+ ring->vsi = vsi;
+ ring->netdev = vsi->netdev;
+ ring->dev = &pf->pdev->dev;
+ ring->count = vsi->num_desc;
+ vsi->rx_rings[i] = ring;
+ }
+
+ return 0;
+
+err_out:
+ ice_vsi_clear_rings(vsi);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_free_irq - Free the irq association with the OS
+ * @vsi: the VSI being configured
+ */
+static void ice_vsi_free_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int base = vsi->base_vector;
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ int i;
+
+ if (!vsi->q_vectors || !vsi->irqs_ready)
+ return;
+
+ vsi->irqs_ready = false;
+ for (i = 0; i < vsi->num_q_vectors; i++) {
+ u16 vector = i + base;
+ int irq_num;
+
+ irq_num = pf->msix_entries[vector].vector;
+
+ /* free only the irqs that were actually requested */
+ if (!vsi->q_vectors[i] ||
+ !(vsi->q_vectors[i]->num_ring_tx ||
+ vsi->q_vectors[i]->num_ring_rx))
+ continue;
+
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
+
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(irq_num, NULL);
+ synchronize_irq(irq_num);
+ devm_free_irq(&pf->pdev->dev, irq_num,
+ vsi->q_vectors[i]);
+ }
+ ice_vsi_release_msix(vsi);
+ }
+}
+
+/**
+ * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
+ * @vsi: the VSI being configured
+ */
+static void ice_vsi_cfg_msix(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 vector = vsi->base_vector;
+ struct ice_hw *hw = &pf->hw;
+ u32 txq = 0, rxq = 0;
+ int i, q, itr;
+ u8 itr_gran;
+
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ itr_gran = hw->itr_gran_200;
+
+ if (q_vector->num_ring_rx) {
+ q_vector->rx.itr =
+ ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting,
+ itr_gran);
+ q_vector->rx.latency_range = ICE_LOW_LATENCY;
+ }
+
+ if (q_vector->num_ring_tx) {
+ q_vector->tx.itr =
+ ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting,
+ itr_gran);
+ q_vector->tx.latency_range = ICE_LOW_LATENCY;
+ }
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr);
+ wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr);
+
+ /* Both Transmit Queue Interrupt Cause Control register
+ * and Receive Queue Interrupt Cause control register
+ * expects MSIX_INDX field to be the vector index
+ * within the function space and not the absolute
+ * vector index across PF or across device.
+ * For SR-IOV VF VSIs queue vector index always starts
+ * with 1 since first vector index(0) is used for OICR
+ * in VF space. Since VMDq and other PF VSIs are withtin
+ * the PF function space, use the vector index thats
+ * tracked for this PF.
+ */
+ for (q = 0; q < q_vector->num_ring_tx; q++) {
+ u32 val;
+
+ itr = ICE_TX_ITR;
+ val = QINT_TQCTL_CAUSE_ENA_M |
+ (itr << QINT_TQCTL_ITR_INDX_S) |
+ (vector << QINT_TQCTL_MSIX_INDX_S);
+ wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
+ txq++;
+ }
+
+ for (q = 0; q < q_vector->num_ring_rx; q++) {
+ u32 val;
+
+ itr = ICE_RX_ITR;
+ val = QINT_RQCTL_CAUSE_ENA_M |
+ (itr << QINT_RQCTL_ITR_INDX_S) |
+ (vector << QINT_RQCTL_MSIX_INDX_S);
+ wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
+ rxq++;
+ }
+ }
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_ena_misc_vector - enable the non-queue interrupts
+ * @pf: board private structure
+ */
+static void ice_ena_misc_vector(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+
+ /* clear things first */
+ wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
+ rd32(hw, PFINT_OICR); /* read to clear */
+
+ val = (PFINT_OICR_HLP_RDY_M |
+ PFINT_OICR_CPM_RDY_M |
+ PFINT_OICR_ECC_ERR_M |
+ PFINT_OICR_MAL_DETECT_M |
+ PFINT_OICR_GRST_M |
+ PFINT_OICR_PCI_EXCEPTION_M |
+ PFINT_OICR_GPIO_M |
+ PFINT_OICR_STORM_DETECT_M |
+ PFINT_OICR_HMC_ERR_M);
+
+ wr32(hw, PFINT_OICR_ENA, val);
+
+ /* SW_ITR_IDX = 0, but don't change INTENA */
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
+}
+
+/**
+ * ice_misc_intr - misc interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
+{
+ struct ice_pf *pf = (struct ice_pf *)data;
+ struct ice_hw *hw = &pf->hw;
+ irqreturn_t ret = IRQ_NONE;
+ u32 oicr, ena_mask;
+
+ set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
+
+ oicr = rd32(hw, PFINT_OICR);
+ ena_mask = rd32(hw, PFINT_OICR_ENA);
+
+ if (!(oicr & PFINT_OICR_INTEVENT_M))
+ goto ena_intr;
+
+ if (oicr & PFINT_OICR_GRST_M) {
+ u32 reset;
+ /* we have a reset warning */
+ ena_mask &= ~PFINT_OICR_GRST_M;
+ reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
+ GLGEN_RSTAT_RESET_TYPE_S;
+
+ if (reset == ICE_RESET_CORER)
+ pf->corer_count++;
+ else if (reset == ICE_RESET_GLOBR)
+ pf->globr_count++;
+ else
+ pf->empr_count++;
+
+ /* If a reset cycle isn't already in progress, we set a bit in
+ * pf->state so that the service task can start a reset/rebuild.
+ * We also make note of which reset happened so that peer
+ * devices/drivers can be informed.
+ */
+ if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) {
+ if (reset == ICE_RESET_CORER)
+ set_bit(__ICE_CORER_RECV, pf->state);
+ else if (reset == ICE_RESET_GLOBR)
+ set_bit(__ICE_GLOBR_RECV, pf->state);
+ else
+ set_bit(__ICE_EMPR_RECV, pf->state);
+
+ set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+ }
+ }
+
+ if (oicr & PFINT_OICR_HMC_ERR_M) {
+ ena_mask &= ~PFINT_OICR_HMC_ERR_M;
+ dev_dbg(&pf->pdev->dev,
+ "HMC Error interrupt - info 0x%x, data 0x%x\n",
+ rd32(hw, PFHMC_ERRORINFO),
+ rd32(hw, PFHMC_ERRORDATA));
+ }
+
+ /* Report and mask off any remaining unexpected interrupts */
+ oicr &= ena_mask;
+ if (oicr) {
+ dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
+ oicr);
+ /* If a critical error is pending there is no choice but to
+ * reset the device.
+ */
+ if (oicr & (PFINT_OICR_PE_CRITERR_M |
+ PFINT_OICR_PCI_EXCEPTION_M |
+ PFINT_OICR_ECC_ERR_M)) {
+ set_bit(__ICE_PFR_REQ, pf->state);
+ ice_service_task_schedule(pf);
+ }
+ ena_mask &= ~oicr;
+ }
+ ret = IRQ_HANDLED;
+
+ena_intr:
+ /* re-enable interrupt causes that are not handled during this pass */
+ wr32(hw, PFINT_OICR_ENA, ena_mask);
+ if (!test_bit(__ICE_DOWN, pf->state)) {
+ ice_service_task_schedule(pf);
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors allotted
+ * through the MSI-X enabling code. On a constrained vector budget, we map Tx
+ * and Rx rings to the vector as "efficiently" as possible.
+ */
+static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+{
+ int q_vectors = vsi->num_q_vectors;
+ int tx_rings_rem, rx_rings_rem;
+ int v_id;
+
+ /* initially assigning remaining rings count to VSIs num queue value */
+ tx_rings_rem = vsi->num_txq;
+ rx_rings_rem = vsi->num_rxq;
+
+ for (v_id = 0; v_id < q_vectors; v_id++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
+ int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
+
+ /* Tx rings mapping to vector */
+ tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_tx = tx_rings_per_v;
+ q_vector->tx.ring = NULL;
+ q_base = vsi->num_txq - tx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
+ struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
+ }
+ tx_rings_rem -= tx_rings_per_v;
+
+ /* Rx rings mapping to vector */
+ rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
+ q_vector->num_ring_rx = rx_rings_per_v;
+ q_vector->rx.ring = NULL;
+ q_base = vsi->num_rxq - rx_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
+ struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ }
+ rx_rings_rem -= rx_rings_per_v;
+ }
+}
+
+/**
+ * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ */
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->alloc_txq = pf->num_lan_tx;
+ vsi->alloc_rxq = pf->num_lan_rx;
+ vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
+ vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @vsi: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ */
+static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
+{
+ struct ice_pf *pf = vsi->back;
+
+ /* allocate memory for both Tx and Rx ring pointers */
+ vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ sizeof(struct ice_ring *), GFP_KERNEL);
+ if (!vsi->tx_rings)
+ goto err_txrings;
+
+ vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ sizeof(struct ice_ring *), GFP_KERNEL);
+ if (!vsi->rx_rings)
+ goto err_rxrings;
+
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
+ vsi->num_q_vectors,
+ sizeof(struct ice_q_vector *),
+ GFP_KERNEL);
+ if (!vsi->q_vectors)
+ goto err_vectors;
+ }
+
+ return 0;
+
+err_vectors:
+ devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+err_rxrings:
+ devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+err_txrings:
+ return -ENOMEM;
+}
+
+/**
+ * ice_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ice_vsi_alloc - Allocates the next available struct vsi in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * returns a pointer to a VSI on success, NULL on failure.
+ */
+static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
+{
+ struct ice_vsi *vsi = NULL;
+
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->sw_mutex);
+
+ /* If we have already allocated our maximum number of VSIs,
+ * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
+ * is available to be populated
+ */
+ if (pf->next_vsi == ICE_NO_VSI) {
+ dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
+ goto unlock_pf;
+ }
+
+ vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
+ if (!vsi)
+ goto unlock_pf;
+
+ vsi->type = type;
+ vsi->back = pf;
+ set_bit(__ICE_DOWN, vsi->state);
+ vsi->idx = pf->next_vsi;
+ vsi->work_lmt = ICE_DFLT_IRQ_WORK;
+
+ ice_vsi_set_num_qs(vsi);
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ if (ice_vsi_alloc_arrays(vsi, true))
+ goto err_rings;
+
+ /* Setup default MSIX irq handler for VSI */
+ vsi->irq_handler = ice_msix_clean_rings;
+ break;
+ default:
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
+ goto unlock_pf;
+ }
+
+ /* fill VSI slot in the PF struct */
+ pf->vsi[pf->next_vsi] = vsi;
+
+ /* prepare pf->next_vsi for next use */
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
+ goto unlock_pf;
+
+err_rings:
+ devm_kfree(&pf->pdev->dev, vsi);
+ vsi = NULL;
+unlock_pf:
+ mutex_unlock(&pf->sw_mutex);
+ return vsi;
+}
+
+/**
+ * ice_free_irq_msix_misc - Unroll misc vector setup
+ * @pf: board private structure
+ */
+static void ice_free_irq_msix_misc(struct ice_pf *pf)
+{
+ /* disable OICR interrupt */
+ wr32(&pf->hw, PFINT_OICR_ENA, 0);
+ ice_flush(&pf->hw);
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
+ synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
+ devm_free_irq(&pf->pdev->dev,
+ pf->msix_entries[pf->oicr_idx].vector, pf);
+ }
+
+ ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
+}
+
+/**
+ * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0, which is used to manage the
+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
+ * when in MSI or Legacy interrupt mode.
+ */
+static int ice_req_irq_msix_misc(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int oicr_idx, err = 0;
+ u8 itr_gran;
+ u32 val;
+
+ if (!pf->int_name[0])
+ snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
+
+ /* Do not request IRQ but do enable OICR interrupt since settings are
+ * lost during reset. Note that this function is called only during
+ * rebuild path and not while reset is in progress.
+ */
+ if (ice_is_reset_recovery_pending(pf->state))
+ goto skip_req_irq;
+
+ /* reserve one vector in irq_tracker for misc interrupts */
+ oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ if (oicr_idx < 0)
+ return oicr_idx;
+
+ pf->oicr_idx = oicr_idx;
+
+ err = devm_request_irq(&pf->pdev->dev,
+ pf->msix_entries[pf->oicr_idx].vector,
+ ice_misc_intr, 0, pf->int_name, pf);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "devm_request_irq for %s failed: %d\n",
+ pf->int_name, err);
+ ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
+ return err;
+ }
+
+skip_req_irq:
+ ice_ena_misc_vector(pf);
+
+ val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) |
+ PFINT_OICR_CTL_CAUSE_ENA_M;
+ wr32(hw, PFINT_OICR_CTL, val);
+
+ /* This enables Admin queue Interrupt causes */
+ val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) |
+ PFINT_FW_CTL_CAUSE_ENA_M;
+ wr32(hw, PFINT_FW_CTL, val);
+
+ itr_gran = hw->itr_gran_200;
+
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
+ ITR_TO_REG(ICE_ITR_8K, itr_gran));
+
+ ice_flush(hw);
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+
+ return 0;
+}
+
+/**
+ * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
+ * @vsi: the VSI getting queues
+ *
+ * Return 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int offset, ret = 0;
+
+ mutex_lock(&pf->avail_q_mutex);
+ /* look for contiguous block of queues for tx */
+ offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
+ 0, vsi->alloc_txq, 0);
+ if (offset < ICE_MAX_TXQS) {
+ int i;
+
+ bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
+ for (i = 0; i < vsi->alloc_txq; i++)
+ vsi->txq_map[i] = i + offset;
+ } else {
+ ret = -ENOMEM;
+ vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
+ }
+
+ /* look for contiguous block of queues for rx */
+ offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
+ 0, vsi->alloc_rxq, 0);
+ if (offset < ICE_MAX_RXQS) {
+ int i;
+
+ bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
+ for (i = 0; i < vsi->alloc_rxq; i++)
+ vsi->rxq_map[i] = i + offset;
+ } else {
+ ret = -ENOMEM;
+ vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
+ }
+ mutex_unlock(&pf->avail_q_mutex);
+
+ return ret;
+}
+
+/**
+ * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
+ * @vsi: the VSI getting queues
+ *
+ * Return 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int i, index = 0;
+
+ mutex_lock(&pf->avail_q_mutex);
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ index = find_next_zero_bit(pf->avail_txqs,
+ ICE_MAX_TXQS, index);
+ if (index < ICE_MAX_TXQS) {
+ set_bit(index, pf->avail_txqs);
+ vsi->txq_map[i] = index;
+ } else {
+ goto err_scatter_tx;
+ }
+ }
+ }
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ index = find_next_zero_bit(pf->avail_rxqs,
+ ICE_MAX_RXQS, index);
+ if (index < ICE_MAX_RXQS) {
+ set_bit(index, pf->avail_rxqs);
+ vsi->rxq_map[i] = index;
+ } else {
+ goto err_scatter_rx;
+ }
+ }
+ }
+
+ mutex_unlock(&pf->avail_q_mutex);
+ return 0;
+
+err_scatter_rx:
+ /* unflag any queues we have grabbed (i is failed position) */
+ for (index = 0; index < i; index++) {
+ clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
+ vsi->rxq_map[index] = 0;
+ }
+ i = vsi->alloc_txq;
+err_scatter_tx:
+ /* i is either position of failed attempt or vsi->alloc_txq */
+ for (index = 0; index < i; index++) {
+ clear_bit(vsi->txq_map[index], pf->avail_txqs);
+ vsi->txq_map[index] = 0;
+ }
+
+ mutex_unlock(&pf->avail_q_mutex);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_get_qs - Assign queues from PF to VSI
+ * @vsi: the VSI to assign queues to
+ *
+ * Returns 0 on success and a negative value on error
+ */
+static int ice_vsi_get_qs(struct ice_vsi *vsi)
+{
+ int ret = 0;
+
+ vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
+ vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
+
+ /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping
+ * modes individually to scatter if assigning contiguous queues
+ * to rx or tx fails
+ */
+ ret = ice_vsi_get_qs_contig(vsi);
+ if (ret < 0) {
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
+ vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
+ ICE_MAX_SCATTER_TXQS);
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
+ vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
+ ICE_MAX_SCATTER_RXQS);
+ ret = ice_vsi_get_qs_scatter(vsi);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_vsi_put_qs - Release queues from VSI to PF
+ * @vsi: the VSI thats going to release queues
+ */
+static void ice_vsi_put_qs(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ mutex_lock(&pf->avail_q_mutex);
+
+ for (i = 0; i < vsi->alloc_txq; i++) {
+ clear_bit(vsi->txq_map[i], pf->avail_txqs);
+ vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
+ }
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
+ vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
+ }
+
+ mutex_unlock(&pf->avail_q_mutex);
+}
+
+/**
+ * ice_free_q_vector - Free memory allocated for a specific interrupt vector
+ * @vsi: VSI having the memory freed
+ * @v_idx: index of the vector to be freed
+ */
+static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_ring *ring;
+
+ if (!vsi->q_vectors[v_idx]) {
+ dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
+ v_idx);
+ return;
+ }
+ q_vector = vsi->q_vectors[v_idx];
+
+ ice_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+ ice_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI with an associated netdev is set up with NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ devm_kfree(&vsi->back->pdev->dev, q_vector);
+ vsi->q_vectors[v_idx] = NULL;
+}
+
+/**
+ * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI having memory freed
+ */
+static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_free_q_vector(vsi, v_idx);
+}
+
+/**
+ * ice_cfg_netdev - Setup the netdev flags
+ * @vsi: the VSI being configured
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_cfg_netdev(struct ice_vsi *vsi)
+{
+ netdev_features_t csumo_features;
+ netdev_features_t vlano_features;
+ netdev_features_t dflt_features;
+ netdev_features_t tso_features;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
+
+ netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
+ vsi->alloc_txq, vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ dflt_features = NETIF_F_SG |
+ NETIF_F_HIGHDMA |
+ NETIF_F_RXHASH;
+
+ csumo_features = NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
+
+ vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+
+ tso_features = NETIF_F_TSO;
+
+ /* set features that user can change */
+ netdev->hw_features = dflt_features | csumo_features |
+ vlano_features | tso_features;
+
+ /* enable features */
+ netdev->features |= netdev->hw_features;
+ /* encap and VLAN devices inherit default, csumo and tso features */
+ netdev->hw_enc_features |= dflt_features | csumo_features |
+ tso_features;
+ netdev->vlan_features |= dflt_features | csumo_features |
+ tso_features;
+
+ if (vsi->type == ICE_VSI_PF) {
+ SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
+ ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
+
+ ether_addr_copy(netdev->dev_addr, mac_addr);
+ ether_addr_copy(netdev->perm_addr, mac_addr);
+ }
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* assign netdev_ops */
+ netdev->netdev_ops = &ice_netdev_ops;
+
+ /* setup watchdog timeout value to be 5 second */
+ netdev->watchdog_timeo = 5 * HZ;
+
+ ice_set_ethtool_ops(netdev);
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = ICE_MAX_MTU;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_free_arrays - clean up vsi resources
+ * @vsi: pointer to VSI being cleared
+ * @free_qvectors: bool to specify if q_vectors should be deallocated
+ */
+static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
+{
+ struct ice_pf *pf = vsi->back;
+
+ /* free the ring and vector containers */
+ if (free_qvectors && vsi->q_vectors) {
+ devm_kfree(&pf->pdev->dev, vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
+ if (vsi->tx_rings) {
+ devm_kfree(&pf->pdev->dev, vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ }
+ if (vsi->rx_rings) {
+ devm_kfree(&pf->pdev->dev, vsi->rx_rings);
+ vsi->rx_rings = NULL;
+ }
+}
+
+/**
+ * ice_vsi_clear - clean up and deallocate the provided vsi
+ * @vsi: pointer to VSI being cleared
+ *
+ * This deallocates the vsi's queue resources, removes it from the PF's
+ * VSI array if necessary, and deallocates the VSI
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_vsi_clear(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = NULL;
+
+ if (!vsi)
+ return 0;
+
+ if (!vsi->back)
+ return -EINVAL;
+
+ pf = vsi->back;
+
+ if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
+ dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
+ vsi->idx);
+ return -EINVAL;
+ }
+
+ mutex_lock(&pf->sw_mutex);
+ /* updates the PF for this cleared vsi */
+
+ pf->vsi[vsi->idx] = NULL;
+ if (vsi->idx < pf->next_vsi)
+ pf->next_vsi = vsi->idx;
+
+ ice_vsi_free_arrays(vsi, true);
+ mutex_unlock(&pf->sw_mutex);
+ devm_kfree(&pf->pdev->dev, vsi);
+
+ return 0;
+}
+
+/**
+ * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ /* only set affinity_mask if the CPU is online */
+ if (cpu_online(v_idx))
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+ /* tie q_vector and vsi together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int v_idx = 0, num_q_vectors;
+ int err;
+
+ if (vsi->q_vectors[0]) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+ vsi->vsi_num);
+ return -EEXIST;
+ }
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ num_q_vectors = vsi->num_q_vectors;
+ } else {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ err = ice_vsi_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ while (v_idx--)
+ ice_free_q_vector(vsi, v_idx);
+
+ dev_err(&pf->pdev->dev,
+ "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->vsi_num, err);
+ vsi->num_q_vectors = 0;
+ return err;
+}
+
+/**
+ * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
+ * @vsi: ptr to the VSI
+ *
+ * This should only be called after ice_vsi_alloc() which allocates the
+ * corresponding SW VSI structure and initializes num_queue_pairs for the
+ * newly allocated VSI.
+ *
+ * Returns 0 on success or negative on failure
+ */
+static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int num_q_vectors = 0;
+
+ if (vsi->base_vector) {
+ dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
+ vsi->vsi_num, vsi->base_vector);
+ return -EEXIST;
+ }
+
+ if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ return -ENOENT;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ num_q_vectors = vsi->num_q_vectors;
+ break;
+ default:
+ dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
+ vsi->type);
+ break;
+ }
+
+ if (num_q_vectors)
+ vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
+ num_q_vectors, vsi->idx);
+
+ if (vsi->base_vector < 0) {
+ dev_err(&pf->pdev->dev,
+ "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
+ num_q_vectors, vsi->vsi_num, vsi->base_vector);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_rss_lut - Fill the RSS lookup table with default values
+ * @lut: Lookup table
+ * @rss_table_size: Lookup table size
+ * @rss_size: Range of queue number for hashing
+ */
+void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
+{
+ u16 i;
+
+ for (i = 0; i < rss_table_size; i++)
+ lut[i] = i % rss_size;
+}
+
+/**
+ * ice_vsi_cfg_rss - Configure RSS params for a VSI
+ * @vsi: VSI to be configured
+ */
+static int ice_vsi_cfg_rss(struct ice_vsi *vsi)
+{
+ u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
+ struct ice_aqc_get_set_rss_keys *key;
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ int err = 0;
+ u8 *lut;
+
+ vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
+
+ lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
+
+ status = ice_aq_set_rss_lut(&pf->hw, vsi->vsi_num, vsi->rss_lut_type,
+ lut, vsi->rss_table_size);
+
+ if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "set_rss_lut failed, error %d\n", status);
+ err = -EIO;
+ goto ice_vsi_cfg_rss_exit;
+ }
+
+ key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
+ if (!key) {
+ err = -ENOMEM;
+ goto ice_vsi_cfg_rss_exit;
+ }
+
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user,
+ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ else
+ netdev_rss_key_fill((void *)seed,
+ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ memcpy(&key->standard_rss_key, seed,
+ ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+
+ status = ice_aq_set_rss_key(&pf->hw, vsi->vsi_num, key);
+
+ if (status) {
+ dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
+ status);
+ err = -EIO;
+ }
+
+ devm_kfree(&pf->pdev->dev, key);
+ice_vsi_cfg_rss_exit:
+ devm_kfree(&pf->pdev->dev, lut);
+ return err;
+}
+
+/**
+ * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI
+ * @vsi: pointer to the ice_vsi
+ *
+ * This reallocates the VSIs queue resources
+ *
+ * Returns 0 on success and negative value on failure
+ */
+static int ice_vsi_reinit_setup(struct ice_vsi *vsi)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ int ret, i;
+
+ if (!vsi)
+ return -EINVAL;
+
+ ice_vsi_free_q_vectors(vsi);
+ ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
+ vsi->base_vector = 0;
+ ice_vsi_clear_rings(vsi);
+ ice_vsi_free_arrays(vsi, false);
+ ice_vsi_set_num_qs(vsi);
+
+ /* Initialize VSI struct elements and create VSI in FW */
+ ret = ice_vsi_add(vsi);
+ if (ret < 0)
+ goto err_vsi;
+
+ ret = ice_vsi_alloc_arrays(vsi, false);
+ if (ret < 0)
+ goto err_vsi;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ if (!vsi->netdev) {
+ ret = ice_cfg_netdev(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = register_netdev(vsi->netdev);
+ if (ret)
+ goto err_rings;
+
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_all_queues(vsi->netdev);
+ }
+
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_vectors;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto err_vectors;
+
+ ice_vsi_map_rings_to_vectors(vsi);
+ break;
+ default:
+ break;
+ }
+
+ ice_vsi_set_tc_cfg(vsi);
+
+ /* configure VSI nodes based on number of queues and TC's */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq;
+
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
+ vsi->tc_cfg.ena_tc, max_txqs);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed VSI lan queue config\n");
+ goto err_vectors;
+ }
+ return 0;
+
+err_vectors:
+ ice_vsi_free_q_vectors(vsi);
+err_rings:
+ if (vsi->netdev) {
+ vsi->current_netdev_flags = 0;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+err_vsi:
+ ice_vsi_clear(vsi);
+ set_bit(__ICE_RESET_FAILED, vsi->back->state);
+ return ret;
+}
+
+/**
+ * ice_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @type: VSI type
+ * @pi: pointer to the port_info instance
+ *
+ * This allocates the sw VSI structure and its queue resources.
+ *
+ * Returns pointer to the successfully allocated and configure VSI sw struct on
+ * success, otherwise returns NULL on failure.
+ */
+static struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type,
+ struct ice_port_info *pi)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct device *dev = &pf->pdev->dev;
+ struct ice_vsi_ctx ctxt = { 0 };
+ struct ice_vsi *vsi;
+ int ret, i;
+
+ vsi = ice_vsi_alloc(pf, type);
+ if (!vsi) {
+ dev_err(dev, "could not allocate VSI\n");
+ return NULL;
+ }
+
+ vsi->port_info = pi;
+ vsi->vsw = pf->first_sw;
+
+ if (ice_vsi_get_qs(vsi)) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto err_get_qs;
+ }
+
+ /* set RSS capabilities */
+ ice_vsi_set_rss_params(vsi);
+
+ /* create the VSI */
+ ret = ice_vsi_add(vsi);
+ if (ret)
+ goto err_vsi;
+
+ ctxt.vsi_num = vsi->vsi_num;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ ret = ice_cfg_netdev(vsi);
+ if (ret)
+ goto err_cfg_netdev;
+
+ ret = register_netdev(vsi->netdev);
+ if (ret)
+ goto err_register_netdev;
+
+ netif_carrier_off(vsi->netdev);
+
+ /* make sure transmit queues start off as stopped */
+ netif_tx_stop_all_queues(vsi->netdev);
+ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto err_msix;
+
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = ice_vsi_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Do not exit if configuring RSS had an issue, at least
+ * receive traffic on first queue. Hence no need to capture
+ * return value
+ */
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_vsi_cfg_rss(vsi);
+ break;
+ default:
+ /* if vsi type is not recognized, clean up the resources and
+ * exit
+ */
+ goto err_rings;
+ }
+
+ ice_vsi_set_tc_cfg(vsi);
+
+ /* configure VSI nodes based on number of queues and TC's */
+ for (i = 0; i < vsi->tc_cfg.numtc; i++)
+ max_txqs[i] = vsi->num_txq;
+
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
+ vsi->tc_cfg.ena_tc, max_txqs);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
+ goto err_rings;
+ }
+
+ return vsi;
+
+err_rings:
+ ice_vsi_free_q_vectors(vsi);
+err_msix:
+ if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(vsi->netdev);
+err_register_netdev:
+ if (vsi->netdev) {
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+err_cfg_netdev:
+ ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL);
+ if (ret)
+ dev_err(&vsi->back->pdev->dev,
+ "Free VSI AQ call failed, err %d\n", ret);
+err_vsi:
+ ice_vsi_put_qs(vsi);
+err_get_qs:
+ pf->q_left_tx += vsi->alloc_txq;
+ pf->q_left_rx += vsi->alloc_rxq;
+ ice_vsi_clear(vsi);
+
+ return NULL;
+}
+
+/**
+ * ice_vsi_add_vlan - Add vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be added
+ */
+static int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
+{
+ struct ice_fltr_list_entry *tmp;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+ int err = 0;
+
+ tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.flag = ICE_FLTR_TX;
+ tmp->fltr_info.src = vsi->vsi_num;
+ tmp->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
+ tmp->fltr_info.l_data.vlan.vlan_id = vid;
+
+ INIT_LIST_HEAD(&tmp->list_entry);
+ list_add(&tmp->list_entry, &tmp_add_list);
+
+ status = ice_add_vlan(&pf->hw, &tmp_add_list);
+ if (status) {
+ err = -ENODEV;
+ dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
+ vid, vsi->vsi_num);
+ }
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return err;
+}
+
+/**
+ * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * @netdev: network interface to be adjusted
+ * @proto: unused protocol
+ * @vid: vlan id to be added
+ *
+ * net_device_ops implementation for adding vlan ids
+ */
+static int ice_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ int ret = 0;
+
+ if (vid >= VLAN_N_VID) {
+ netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
+ vid, VLAN_N_VID);
+ return -EINVAL;
+ }
+
+ if (vsi->info.pvid)
+ return -EINVAL;
+
+ /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
+ * needed to continue allowing all untagged packets since VLAN prune
+ * list is applied to all packets by the switch
+ */
+ ret = ice_vsi_add_vlan(vsi, vid);
+
+ if (!ret)
+ set_bit(vid, vsi->active_vlans);
+
+ return ret;
+}
+
+/**
+ * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
+ * @vsi: the VSI being configured
+ * @vid: VLAN id to be removed
+ */
+static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
+{
+ struct ice_fltr_list_entry *list;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+
+ list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return;
+
+ list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+ list->fltr_info.fwd_id.vsi_id = vsi->vsi_num;
+ list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ list->fltr_info.l_data.vlan.vlan_id = vid;
+ list->fltr_info.flag = ICE_FLTR_TX;
+ list->fltr_info.src = vsi->vsi_num;
+
+ INIT_LIST_HEAD(&list->list_entry);
+ list_add(&list->list_entry, &tmp_add_list);
+
+ if (ice_remove_vlan(&pf->hw, &tmp_add_list))
+ dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
+ vid, vsi->vsi_num);
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+}
+
+/**
+ * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * @netdev: network interface to be adjusted
+ * @proto: unused protocol
+ * @vid: vlan id to be removed
+ *
+ * net_device_ops implementation for removing vlan ids
+ */
+static int ice_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ if (vsi->info.pvid)
+ return -EINVAL;
+
+ /* return code is ignored as there is nothing a user
+ * can do about failure to remove and a log message was
+ * already printed from the other function
+ */
+ ice_vsi_kill_vlan(vsi, vid);
+
+ clear_bit(vid, vsi->active_vlans);
+
+ return 0;
+}
+
+/**
+ * ice_setup_pf_sw - Setup the HW switch on startup or after reset
+ * @pf: board private structure
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_setup_pf_sw(struct ice_pf *pf)
+{
+ LIST_HEAD(tmp_add_list);
+ u8 broadcast[ETH_ALEN];
+ struct ice_vsi *vsi;
+ int status = 0;
+
+ if (!ice_is_reset_recovery_pending(pf->state)) {
+ vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info);
+ if (!vsi) {
+ status = -ENOMEM;
+ goto error_exit;
+ }
+ } else {
+ vsi = pf->vsi[0];
+ status = ice_vsi_reinit_setup(vsi);
+ if (status < 0)
+ return -EIO;
+ }
+
+ /* tmp_add_list contains a list of MAC addresses for which MAC
+ * filters need to be programmed. Add the VSI's unicast MAC to
+ * this list
+ */
+ status = ice_add_mac_to_list(vsi, &tmp_add_list,
+ vsi->port_info->mac.perm_addr);
+ if (status)
+ goto error_exit;
+
+ /* VSI needs to receive broadcast traffic, so add the broadcast
+ * MAC address to the list.
+ */
+ eth_broadcast_addr(broadcast);
+ status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+ if (status)
+ goto error_exit;
+
+ /* program MAC filters for entries in tmp_add_list */
+ status = ice_add_mac(&pf->hw, &tmp_add_list);
+ if (status) {
+ dev_err(&pf->pdev->dev, "Could not add MAC filters\n");
+ status = -ENOMEM;
+ goto error_exit;
+ }
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ return status;
+
+error_exit:
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+
+ if (vsi) {
+ ice_vsi_free_q_vectors(vsi);
+ if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(vsi->netdev);
+ if (vsi->netdev) {
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+
+ ice_vsi_delete(vsi);
+ ice_vsi_put_qs(vsi);
+ pf->q_left_tx += vsi->alloc_txq;
+ pf->q_left_rx += vsi->alloc_rxq;
+ ice_vsi_clear(vsi);
+ }
+ return status;
+}
+
+/**
+ * ice_determine_q_usage - Calculate queue distribution
+ * @pf: board private structure
+ *
+ * Return -ENOMEM if we don't get enough queues for all ports
+ */
+static void ice_determine_q_usage(struct ice_pf *pf)
+{
+ u16 q_left_tx, q_left_rx;
+
+ q_left_tx = pf->hw.func_caps.common_cap.num_txq;
+ q_left_rx = pf->hw.func_caps.common_cap.num_rxq;
+
+ pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
+
+ /* only 1 rx queue unless RSS is enabled */
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ pf->num_lan_rx = 1;
+ else
+ pf->num_lan_rx = min_t(int, q_left_rx, num_online_cpus());
+
+ pf->q_left_tx = q_left_tx - pf->num_lan_tx;
+ pf->q_left_rx = q_left_rx - pf->num_lan_rx;
+}
+
+/**
+ * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
+ * @pf: board private structure to initialize
+ */
+static void ice_deinit_pf(struct ice_pf *pf)
+{
+ if (pf->serv_tmr.function)
+ del_timer_sync(&pf->serv_tmr);
+ if (pf->serv_task.func)
+ cancel_work_sync(&pf->serv_task);
+ mutex_destroy(&pf->sw_mutex);
+ mutex_destroy(&pf->avail_q_mutex);
+}
+
+/**
+ * ice_init_pf - Initialize general software structures (struct ice_pf)
+ * @pf: board private structure to initialize
+ */
+static void ice_init_pf(struct ice_pf *pf)
+{
+ bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
+ set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+
+ mutex_init(&pf->sw_mutex);
+ mutex_init(&pf->avail_q_mutex);
+
+ /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
+ mutex_lock(&pf->avail_q_mutex);
+ bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
+ bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
+ mutex_unlock(&pf->avail_q_mutex);
+
+ if (pf->hw.func_caps.common_cap.rss_table_size)
+ set_bit(ICE_FLAG_RSS_ENA, pf->flags);
+
+ /* setup service timer and periodic service task */
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task);
+ clear_bit(__ICE_SERVICE_SCHED, pf->state);
+}
+
+/**
+ * ice_ena_msix_range - Request a range of MSIX vectors from the OS
+ * @pf: board private structure
+ *
+ * compute the number of MSIX vectors required (v_budget) and request from
+ * the OS. Return the number of vectors reserved or negative on failure
+ */
+static int ice_ena_msix_range(struct ice_pf *pf)
+{
+ int v_left, v_actual, v_budget = 0;
+ int needed, err, i;
+
+ v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
+
+ /* reserve one vector for miscellaneous handler */
+ needed = 1;
+ v_budget += needed;
+ v_left -= needed;
+
+ /* reserve vectors for LAN traffic */
+ pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
+ v_budget += pf->num_lan_msix;
+
+ pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+
+ if (!pf->msix_entries) {
+ err = -ENOMEM;
+ goto exit_err;
+ }
+
+ for (i = 0; i < v_budget; i++)
+ pf->msix_entries[i].entry = i;
+
+ /* actually reserve the vectors */
+ v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+ ICE_MIN_MSIX, v_budget);
+
+ if (v_actual < 0) {
+ dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n");
+ err = v_actual;
+ goto msix_err;
+ }
+
+ if (v_actual < v_budget) {
+ dev_warn(&pf->pdev->dev,
+ "not enough vectors. requested = %d, obtained = %d\n",
+ v_budget, v_actual);
+ if (v_actual >= (pf->num_lan_msix + 1)) {
+ pf->num_avail_msix = v_actual - (pf->num_lan_msix + 1);
+ } else if (v_actual >= 2) {
+ pf->num_lan_msix = 1;
+ pf->num_avail_msix = v_actual - 2;
+ } else {
+ pci_disable_msix(pf->pdev);
+ err = -ERANGE;
+ goto msix_err;
+ }
+ }
+
+ return v_actual;
+
+msix_err:
+ devm_kfree(&pf->pdev->dev, pf->msix_entries);
+ goto exit_err;
+
+exit_err:
+ pf->num_lan_msix = 0;
+ clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+ return err;
+}
+
+/**
+ * ice_dis_msix - Disable MSI-X interrupt setup in OS
+ * @pf: board private structure
+ */
+static void ice_dis_msix(struct ice_pf *pf)
+{
+ pci_disable_msix(pf->pdev);
+ devm_kfree(&pf->pdev->dev, pf->msix_entries);
+ pf->msix_entries = NULL;
+ clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
+}
+
+/**
+ * ice_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ */
+static int ice_init_interrupt_scheme(struct ice_pf *pf)
+{
+ int vectors = 0;
+ ssize_t size;
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ vectors = ice_ena_msix_range(pf);
+ else
+ return -ENODEV;
+
+ if (vectors < 0)
+ return vectors;
+
+ /* set up vector assignment tracking */
+ size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors);
+
+ pf->irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL);
+ if (!pf->irq_tracker) {
+ ice_dis_msix(pf);
+ return -ENOMEM;
+ }
+
+ pf->irq_tracker->num_entries = vectors;
+
+ return 0;
+}
+
+/**
+ * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
+ * @pf: board private structure
+ */
+static void ice_clear_interrupt_scheme(struct ice_pf *pf)
+{
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ ice_dis_msix(pf);
+
+ devm_kfree(&pf->pdev->dev, pf->irq_tracker);
+ pf->irq_tracker = NULL;
+}
+
+/**
+ * ice_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ice_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+
+ /* this driver uses devres, see Documentation/driver-model/devres.txt */
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
+ if (err) {
+ dev_err(&pdev->dev, "I/O map error %d\n", err);
+ return err;
+ }
+
+ pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
+ if (!pf)
+ return -ENOMEM;
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ return err;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ pf->pdev = pdev;
+ pci_set_drvdata(pdev, pf);
+ set_bit(__ICE_DOWN, pf->state);
+
+ hw = &pf->hw;
+ hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
+ hw->back = pf;
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+ ice_set_ctrlq_len(hw);
+
+ pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+
+#ifndef CONFIG_DYNAMIC_DEBUG
+ if (debug < -1)
+ hw->debug_mask = debug;
+#endif
+
+ err = ice_init_hw(hw);
+ if (err) {
+ dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
+ err = -EIO;
+ goto err_exit_unroll;
+ }
+
+ dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
+ hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
+ hw->api_maj_ver, hw->api_min_ver);
+
+ ice_init_pf(pf);
+
+ ice_determine_q_usage(pf);
+
+ pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
+ hw->func_caps.guaranteed_num_vsi);
+ if (!pf->num_alloc_vsi) {
+ err = -EIO;
+ goto err_init_pf_unroll;
+ }
+
+ pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
+ sizeof(struct ice_vsi *), GFP_KERNEL);
+ if (!pf->vsi) {
+ err = -ENOMEM;
+ goto err_init_pf_unroll;
+ }
+
+ err = ice_init_interrupt_scheme(pf);
+ if (err) {
+ dev_err(&pdev->dev,
+ "ice_init_interrupt_scheme failed: %d\n", err);
+ err = -EIO;
+ goto err_init_interrupt_unroll;
+ }
+
+ /* In case of MSIX we are going to setup the misc vector right here
+ * to handle admin queue events etc. In case of legacy and MSI
+ * the misc functionality and queue processing is combined in
+ * the same vector and that gets setup at open.
+ */
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(&pdev->dev,
+ "setup of misc vector failed: %d\n", err);
+ goto err_init_interrupt_unroll;
+ }
+ }
+
+ /* create switch struct for the switch element created by FW on boot */
+ pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw),
+ GFP_KERNEL);
+ if (!pf->first_sw) {
+ err = -ENOMEM;
+ goto err_msix_misc_unroll;
+ }
+
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ pf->first_sw->pf = pf;
+
+ /* record the sw_id available for later use */
+ pf->first_sw->sw_id = hw->port_info->sw_id;
+
+ err = ice_setup_pf_sw(pf);
+ if (err) {
+ dev_err(&pdev->dev,
+ "probe failed due to setup pf switch:%d\n", err);
+ goto err_alloc_sw_unroll;
+ }
+
+ /* Driver is mostly up */
+ clear_bit(__ICE_DOWN, pf->state);
+
+ /* since everything is good, start the service timer */
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+
+ err = ice_init_link_events(pf->hw.port_info);
+ if (err) {
+ dev_err(&pdev->dev, "ice_init_link_events failed: %d\n", err);
+ goto err_alloc_sw_unroll;
+ }
+
+ return 0;
+
+err_alloc_sw_unroll:
+ set_bit(__ICE_DOWN, pf->state);
+ devm_kfree(&pf->pdev->dev, pf->first_sw);
+err_msix_misc_unroll:
+ ice_free_irq_msix_misc(pf);
+err_init_interrupt_unroll:
+ ice_clear_interrupt_scheme(pf);
+ devm_kfree(&pdev->dev, pf->vsi);
+err_init_pf_unroll:
+ ice_deinit_pf(pf);
+ ice_deinit_hw(hw);
+err_exit_unroll:
+ pci_disable_pcie_error_reporting(pdev);
+ return err;
+}
+
+/**
+ * ice_remove - Device removal routine
+ * @pdev: PCI device information struct
+ */
+static void ice_remove(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ int i = 0;
+ int err;
+
+ if (!pf)
+ return;
+
+ set_bit(__ICE_DOWN, pf->state);
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (!pf->vsi[i])
+ continue;
+
+ err = ice_vsi_release(pf->vsi[i]);
+ if (err)
+ dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n",
+ i, err);
+ }
+
+ ice_free_irq_msix_misc(pf);
+ ice_clear_interrupt_scheme(pf);
+ ice_deinit_pf(pf);
+ ice_deinit_hw(&pf->hw);
+ pci_disable_pcie_error_reporting(pdev);
+}
+
+/* ice_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id ice_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_10G_BASE_T), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_C810_SGMII), 0 },
+ /* required last entry */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
+
+static struct pci_driver ice_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ice_pci_tbl,
+ .probe = ice_probe,
+ .remove = ice_remove,
+};
+
+/**
+ * ice_module_init - Driver registration routine
+ *
+ * ice_module_init is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init ice_module_init(void)
+{
+ int status;
+
+ pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
+ pr_info("%s\n", ice_copyright);
+
+ ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME);
+ if (!ice_wq) {
+ pr_err("Failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
+ status = pci_register_driver(&ice_driver);
+ if (status) {
+ pr_err("failed to register pci driver, err %d\n", status);
+ destroy_workqueue(ice_wq);
+ }
+
+ return status;
+}
+module_init(ice_module_init);
+
+/**
+ * ice_module_exit - Driver exit cleanup routine
+ *
+ * ice_module_exit is called just before the driver is removed
+ * from memory.
+ */
+static void __exit ice_module_exit(void)
+{
+ pci_unregister_driver(&ice_driver);
+ destroy_workqueue(ice_wq);
+ pr_info("module unloaded\n");
+}
+module_exit(ice_module_exit);
+
+/**
+ * ice_set_mac_address - NDO callback to set mac address
+ * @netdev: network interface device structure
+ * @pi: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_set_mac_address(struct net_device *netdev, void *pi)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ struct sockaddr *addr = pi;
+ enum ice_status status;
+ LIST_HEAD(a_mac_list);
+ LIST_HEAD(r_mac_list);
+ u8 flags = 0;
+ int err;
+ u8 *mac;
+
+ mac = (u8 *)addr->sa_data;
+
+ if (!is_valid_ether_addr(mac))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, mac)) {
+ netdev_warn(netdev, "already using mac %pM\n", mac);
+ return 0;
+ }
+
+ if (test_bit(__ICE_DOWN, pf->state) ||
+ ice_is_reset_recovery_pending(pf->state)) {
+ netdev_err(netdev, "can't set mac %pM. device not ready\n",
+ mac);
+ return -EBUSY;
+ }
+
+ /* When we change the mac address we also have to change the mac address
+ * based filter rules that were created previously for the old mac
+ * address. So first, we remove the old filter rule using ice_remove_mac
+ * and then create a new filter rule using ice_add_mac. Note that for
+ * both these operations, we first need to form a "list" of mac
+ * addresses (even though in this case, we have only 1 mac address to be
+ * added/removed) and this done using ice_add_mac_to_list. Depending on
+ * the ensuing operation this "list" of mac addresses is either to be
+ * added or removed from the filter.
+ */
+ err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
+ if (err) {
+ err = -EADDRNOTAVAIL;
+ goto free_lists;
+ }
+
+ status = ice_remove_mac(hw, &r_mac_list);
+ if (status) {
+ err = -EADDRNOTAVAIL;
+ goto free_lists;
+ }
+
+ err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
+ if (err) {
+ err = -EADDRNOTAVAIL;
+ goto free_lists;
+ }
+
+ status = ice_add_mac(hw, &a_mac_list);
+ if (status) {
+ err = -EADDRNOTAVAIL;
+ goto free_lists;
+ }
+
+free_lists:
+ /* free list entries */
+ ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
+ ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
+
+ if (err) {
+ netdev_err(netdev, "can't set mac %pM. filter update failed\n",
+ mac);
+ return err;
+ }
+
+ /* change the netdev's mac address */
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
+ netdev->dev_addr);
+
+ /* write new mac address to the firmware */
+ flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
+ status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
+ if (status) {
+ netdev_err(netdev, "can't set mac %pM. write to firmware failed.\n",
+ mac);
+ }
+ return 0;
+}
+
+/**
+ * ice_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ */
+static void ice_set_rx_mode(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ if (!vsi)
+ return;
+
+ /* Set the flags to synchronize filters
+ * ndo_set_rx_mode may be triggered even without a change in netdev
+ * flags
+ */
+ set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
+ set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
+ set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
+
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ ice_service_task_schedule(vsi->back);
+}
+
+/**
+ * ice_fdb_add - add an entry to the hardware database
+ * @ndm: the input from the stack
+ * @tb: pointer to array of nladdr (unused)
+ * @dev: the net device pointer
+ * @addr: the MAC address entry being added
+ * @vid: VLAN id
+ * @flags: instructions from stack about fdb operation
+ */
+static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid, u16 flags)
+{
+ int err;
+
+ if (vid) {
+ netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
+ return -EINVAL;
+ }
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ netdev_err(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+/**
+ * ice_fdb_del - delete an entry from the hardware database
+ * @ndm: the input from the stack
+ * @tb: pointer to array of nladdr (unused)
+ * @dev: the net device pointer
+ * @addr: the MAC address entry being added
+ * @vid: VLAN id
+ */
+static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ __always_unused u16 vid)
+{
+ int err;
+
+ if (ndm->ndm_state & NUD_PERMANENT) {
+ netdev_err(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+
+ return err;
+}
+
+/**
+ * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
+ * @vsi: the vsi being changed
+ */
+static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ /* Here we are configuring the VSI to let the driver add VLAN tags by
+ * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN
+ * tag insertion happens in the Tx hot path, in ice_tx_map.
+ */
+ ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL;
+
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.vsi_num = vsi->vsi_num;
+
+ status = ice_aq_update_vsi(hw, &ctxt, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
+ return 0;
+}
+
+/**
+ * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
+ * @vsi: the vsi being changed
+ * @ena: boolean value indicating if this is a enable or disable request
+ */
+static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+{
+ struct device *dev = &vsi->back->pdev->dev;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx ctxt = { 0 };
+ enum ice_status status;
+
+ /* Here we are configuring what the VSI should do with the VLAN tag in
+ * the Rx packet. We can either leave the tag in the packet or put it in
+ * the Rx descriptor.
+ */
+ if (ena) {
+ /* Strip VLAN tag from Rx packet and put it in the desc */
+ ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ } else {
+ /* Disable stripping. Leave tag in packet */
+ ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING;
+ }
+
+ ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.vsi_num = vsi->vsi_num;
+
+ status = ice_aq_update_vsi(hw, &ctxt, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for VALN strip failed, ena = %d err %d aq_err %d\n",
+ ena, status, hw->adminq.sq_last_status);
+ return -EIO;
+ }
+
+ vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
+ return 0;
+}
+
+/**
+ * ice_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ */
+static int ice_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ int ret = 0;
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
+ ret = ice_vsi_manage_vlan_stripping(vsi, true);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
+ ret = ice_vsi_manage_vlan_stripping(vsi, false);
+ else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
+ ret = ice_vsi_manage_vlan_insertion(vsi);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
+ ret = ice_vsi_manage_vlan_insertion(vsi);
+
+ return ret;
+}
+
+/**
+ * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
+ * @vsi: VSI to setup vlan properties for
+ */
+static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
+{
+ int ret = 0;
+
+ if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ ret = ice_vsi_manage_vlan_stripping(vsi, true);
+ if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ ret = ice_vsi_manage_vlan_insertion(vsi);
+
+ return ret;
+}
+
+/**
+ * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
+ * @vsi: the VSI being brought back up
+ */
+static int ice_restore_vlan(struct ice_vsi *vsi)
+{
+ int err;
+ u16 vid;
+
+ if (!vsi->netdev)
+ return -EINVAL;
+
+ err = ice_vsi_vlan_setup(vsi);
+ if (err)
+ return err;
+
+ for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
+ err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: The Tx ring to configure
+ * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ */
+static void
+ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ /* queue belongs to a specific VSI type
+ * VF / VM index should be programmed per vmvf_type setting:
+ * for vmvf_type = VF, it is VF number between 0-256
+ * for vmvf_type = VM, it is VM number between 0-767
+ * for PF or EMP this field should be set to zero
+ */
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ break;
+ default:
+ return;
+ }
+
+ /* make sure the context is associated with the right VSI */
+ tlan_ctx->src_vsi = vsi->vsi_num;
+
+ tlan_ctx->tso_ena = ICE_TX_LEGACY;
+ tlan_ctx->tso_qnum = pf_q;
+
+ /* Legacy or Advanced Host Interface:
+ * 0: Advanced Host Interface
+ * 1: Legacy Host Interface
+ */
+ tlan_ctx->legacy_int = ICE_TX_LEGACY;
+}
+
+/**
+ * ice_vsi_cfg_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+static int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
+{
+ struct ice_aqc_add_tx_qgrp *qg_buf;
+ struct ice_aqc_add_txqs_perq *txq;
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ u16 buf_len, i, pf_q;
+ int err = 0, tc = 0;
+ u8 num_q_grps;
+
+ buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
+ qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
+ if (!qg_buf)
+ return -ENOMEM;
+
+ if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
+ err = -EINVAL;
+ goto err_cfg_txqs;
+ }
+ qg_buf->num_txqs = 1;
+ num_q_grps = 1;
+
+ /* set up and configure the tx queues */
+ ice_for_each_txq(vsi, i) {
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+
+ pf_q = vsi->txq_map[i];
+ ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as transmit
+ * comm scheduler queue doorbell.
+ */
+ vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc,
+ num_q_grps, qg_buf, buf_len, NULL);
+ if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ err = -ENODEV;
+ goto err_cfg_txqs;
+ }
+
+ /* Add Tx Queue TEID into the VSI tx ring from the response
+ * This will complete configuring and enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ vsi->tx_rings[i]->txq_teid =
+ le32_to_cpu(txq->q_teid);
+ }
+err_cfg_txqs:
+ devm_kfree(&pf->pdev->dev, qg_buf);
+ return err;
+}
+
+/**
+ * ice_setup_rx_ctx - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in RLAN context.
+ */
+static int ice_setup_rx_ctx(struct ice_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 rxdid = ICE_RXDID_FLEX_NIC;
+ struct ice_rlan_ctx rlan_ctx;
+ u32 regval;
+ u16 pf_q;
+ int err;
+
+ /* what is RX queue number in global space of 2K rx queues */
+ pf_q = vsi->rxq_map[ring->q_index];
+
+ /* clear the context structure first */
+ memset(&rlan_ctx, 0, sizeof(rlan_ctx));
+
+ rlan_ctx.base = ring->dma >> 7;
+
+ rlan_ctx.qlen = ring->count;
+
+ /* Receive Packet Data Buffer Size.
+ * The Packet Data Buffer Size is defined in 128 byte units.
+ */
+ rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
+
+ /* use 32 byte descriptors */
+ rlan_ctx.dsize = 1;
+
+ /* Strip the Ethernet CRC bytes before the packet is posted to host
+ * memory.
+ */
+ rlan_ctx.crcstrip = 1;
+
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
+ rlan_ctx.l2tsel = 1;
+
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
+
+ /* This controls whether VLAN is stripped from inner headers
+ * The VLAN in the inner L2 header is stripped to the receive
+ * descriptor if enabled by this flag.
+ */
+ rlan_ctx.showiv = 0;
+
+ /* Max packet size for this queue - must not be set to a larger value
+ * than 5 x DBUF
+ */
+ rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
+ ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
+
+ /* Rx queue threshold in units of 64 */
+ rlan_ctx.lrxqthresh = 1;
+
+ /* Enable Flexible Descriptors in the queue context which
+ * allows this driver to select a specific receive descriptor format
+ */
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ /* increasing context priority to pick up profile id;
+ * default is 0x01; setting to 0x03 to ensure profile
+ * is programming if prev context is of same priority
+ */
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+
+ /* Absolute queue number out of 2K needs to be passed */
+ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+ if (err) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ pf_q, err);
+ return -EIO;
+ }
+
+ /* init queue specific tail register */
+ ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+ ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
+
+ return 0;
+}
+
+/**
+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Rx VSI for operation.
+ */
+static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
+ vsi->max_frame = vsi->netdev->mtu +
+ ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ else
+ vsi->max_frame = ICE_RXBUF_2048;
+
+ vsi->rx_buf_len = ICE_RXBUF_2048;
+ /* set up individual rings */
+ for (i = 0; i < vsi->num_rxq && !err; i++)
+ err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+
+ if (err) {
+ dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
+ return -EIO;
+ }
+ return err;
+}
+
+/**
+ * ice_vsi_cfg - Setup the VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and negative value on error
+ */
+static int ice_vsi_cfg(struct ice_vsi *vsi)
+{
+ int err;
+
+ ice_set_rx_mode(vsi->netdev);
+
+ err = ice_restore_vlan(vsi);
+ if (err)
+ return err;
+
+ err = ice_vsi_cfg_txqs(vsi);
+ if (!err)
+ err = ice_vsi_cfg_rxqs(vsi);
+
+ return err;
+}
+
+/**
+ * ice_vsi_stop_tx_rings - Disable Tx rings
+ * @vsi: the VSI being configured
+ */
+static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+ u32 *q_teids, val;
+ u16 *q_ids, i;
+ int err = 0;
+
+ if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
+ return -EINVAL;
+
+ q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
+ GFP_KERNEL);
+ if (!q_teids)
+ return -ENOMEM;
+
+ q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
+ GFP_KERNEL);
+ if (!q_ids) {
+ err = -ENOMEM;
+ goto err_alloc_q_ids;
+ }
+
+ /* set up the tx queue list to be disabled */
+ ice_for_each_txq(vsi, i) {
+ u16 v_idx;
+
+ if (!vsi->tx_rings || !vsi->tx_rings[i]) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ q_ids[i] = vsi->txq_map[i];
+ q_teids[i] = vsi->tx_rings[i]->txq_teid;
+
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
+
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
+
+ /* trigger a software interrupt for the vector associated to
+ * the queue to schedule napi handler
+ */
+ v_idx = vsi->tx_rings[i]->q_vector->v_idx;
+ wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx),
+ GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
+ }
+ status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
+ NULL);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n",
+ status);
+ err = -ENODEV;
+ }
+
+err_out:
+ devm_kfree(&pf->pdev->dev, q_ids);
+
+err_alloc_q_ids:
+ devm_kfree(&pf->pdev->dev, q_teids);
+
+ return err;
+}
+
+/**
+ * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @ena: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ */
+static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
+{
+ int i;
+
+ for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
+ u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
+
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ break;
+
+ usleep_range(10, 20);
+ }
+ if (i >= ICE_Q_WAIT_RETRY_LIMIT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings
+ * @vsi: the VSI being configured
+ * @ena: start or stop the rx rings
+ */
+static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int i, j, ret = 0;
+
+ for (i = 0; i < vsi->num_rxq; i++) {
+ int pf_q = vsi->rxq_map[i];
+ u32 rx_reg;
+
+ for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
+ if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
+ ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Skip if the queue is already in the requested state */
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ continue;
+
+ /* turn on/off the queue */
+ if (ena)
+ rx_reg |= QRX_CTRL_QENA_REQ_M;
+ else
+ rx_reg &= ~QRX_CTRL_QENA_REQ_M;
+ wr32(hw, QRX_CTRL(pf_q), rx_reg);
+
+ /* wait for the change to finish */
+ ret = ice_pf_rxq_wait(pf, pf_q, ena);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VSI idx %d Rx ring %d %sable timeout\n",
+ vsi->idx, pf_q, (ena ? "en" : "dis"));
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * ice_vsi_start_rx_rings - start VSI's rx rings
+ * @vsi: the VSI whose rings are to be started
+ *
+ * Returns 0 on success and a negative value on error
+ */
+static int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
+{
+ return ice_vsi_ctrl_rx_rings(vsi, true);
+}
+
+/**
+ * ice_vsi_stop_rx_rings - stop VSI's rx rings
+ * @vsi: the VSI
+ *
+ * Returns 0 on success and a negative value on error
+ */
+static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
+{
+ return ice_vsi_ctrl_rx_rings(vsi, false);
+}
+
+/**
+ * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings
+ * @vsi: the VSI
+ * Returns 0 on success and a negative value on error
+ */
+static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)
+{
+ int err_tx, err_rx;
+
+ err_tx = ice_vsi_stop_tx_rings(vsi);
+ if (err_tx)
+ dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n");
+
+ err_rx = ice_vsi_stop_rx_rings(vsi);
+ if (err_rx)
+ dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n");
+
+ if (err_tx || err_rx)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ */
+static void ice_napi_enable_all(struct ice_vsi *vsi)
+{
+ int q_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+ napi_enable(&vsi->q_vectors[q_idx]->napi);
+}
+
+/**
+ * ice_up_complete - Finish the last steps of bringing up a connection
+ * @vsi: The VSI being configured
+ *
+ * Return 0 on success and negative value on error
+ */
+static int ice_up_complete(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ ice_vsi_cfg_msix(vsi);
+ else
+ return -ENOTSUPP;
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ err = ice_vsi_start_rx_rings(vsi);
+ if (err)
+ return err;
+
+ clear_bit(__ICE_DOWN, vsi->state);
+ ice_napi_enable_all(vsi);
+ ice_vsi_ena_irq(vsi);
+
+ if (vsi->port_info &&
+ (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
+ vsi->netdev) {
+ ice_print_link_msg(vsi, true);
+ netif_tx_start_all_queues(vsi->netdev);
+ netif_carrier_on(vsi->netdev);
+ }
+
+ ice_service_task_schedule(pf);
+
+ return err;
+}
+
+/**
+ * ice_up - Bring the connection back up after being down
+ * @vsi: VSI being configured
+ */
+int ice_up(struct ice_vsi *vsi)
+{
+ int err;
+
+ err = ice_vsi_cfg(vsi);
+ if (!err)
+ err = ice_up_complete(vsi);
+
+ return err;
+}
+
+/**
+ * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
+ * @ring: Tx or Rx ring to read stats from
+ * @pkts: packets stats counter
+ * @bytes: bytes stats counter
+ *
+ * This function fetches stats from the ring considering the atomic operations
+ * that needs to be performed to read u64 values in 32 bit machine.
+ */
+static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
+ u64 *bytes)
+{
+ unsigned int start;
+ *pkts = 0;
+ *bytes = 0;
+
+ if (!ring)
+ return;
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ *pkts = ring->stats.pkts;
+ *bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+}
+
+/**
+ * ice_stat_update40 - read 40 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @hireg: high 32 bit HW register to read from
+ * @loreg: low 32 bit HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+static void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat,
+ u64 *cur_stat)
+{
+ u64 new_data;
+
+ new_data = rd32(hw, loreg);
+ new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+
+ /* device stats are not reset at PFR, they likely will not be zeroed
+ * when the driver starts. So save the first values read and use them as
+ * offsets to be subtracted from the raw values in order to report stats
+ * that count from zero.
+ */
+ if (!prev_stat_loaded)
+ *prev_stat = new_data;
+ if (likely(new_data >= *prev_stat))
+ *cur_stat = new_data - *prev_stat;
+ else
+ /* to manage the potential roll-over */
+ *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
+ *cur_stat &= 0xFFFFFFFFFFULL;
+}
+
+/**
+ * ice_stat_update32 - read 32 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @reg: HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+static void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+
+ /* device stats are not reset at PFR, they likely will not be zeroed
+ * when the driver starts. So save the first values read and use them as
+ * offsets to be subtracted from the raw values in order to report stats
+ * that count from zero.
+ */
+ if (!prev_stat_loaded)
+ *prev_stat = new_data;
+ if (likely(new_data >= *prev_stat))
+ *cur_stat = new_data - *prev_stat;
+ else
+ /* to manage the potential roll-over */
+ *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
+}
+
+/**
+ * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
+ * @vsi: the VSI to be updated
+ */
+static void ice_update_eth_stats(struct ice_vsi *vsi)
+{
+ struct ice_eth_stats *prev_es, *cur_es;
+ struct ice_hw *hw = &vsi->back->hw;
+ u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
+
+ prev_es = &vsi->eth_stats_prev;
+ cur_es = &vsi->eth_stats;
+
+ ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_bytes,
+ &cur_es->rx_bytes);
+
+ ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_unicast,
+ &cur_es->rx_unicast);
+
+ ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_multicast,
+ &cur_es->rx_multicast);
+
+ ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
+ &cur_es->rx_broadcast);
+
+ ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_discards, &cur_es->rx_discards);
+
+ ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_bytes,
+ &cur_es->tx_bytes);
+
+ ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_unicast,
+ &cur_es->tx_unicast);
+
+ ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_multicast,
+ &cur_es->tx_multicast);
+
+ ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
+ vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
+ &cur_es->tx_broadcast);
+
+ ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_errors, &cur_es->tx_errors);
+
+ vsi->stat_offsets_loaded = true;
+}
+
+/**
+ * ice_update_vsi_ring_stats - Update VSI stats counters
+ * @vsi: the VSI to be updated
+ */
+static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
+{
+ struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+ struct ice_ring *ring;
+ u64 pkts, bytes;
+ int i;
+
+ /* reset netdev stats */
+ vsi_stats->tx_packets = 0;
+ vsi_stats->tx_bytes = 0;
+ vsi_stats->rx_packets = 0;
+ vsi_stats->rx_bytes = 0;
+
+ /* reset non-netdev (extended) stats */
+ vsi->tx_restart = 0;
+ vsi->tx_busy = 0;
+ vsi->tx_linearize = 0;
+ vsi->rx_buf_failed = 0;
+ vsi->rx_page_failed = 0;
+
+ rcu_read_lock();
+
+ /* update Tx rings counters */
+ ice_for_each_txq(vsi, i) {
+ ring = READ_ONCE(vsi->tx_rings[i]);
+ ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ vsi_stats->tx_packets += pkts;
+ vsi_stats->tx_bytes += bytes;
+ vsi->tx_restart += ring->tx_stats.restart_q;
+ vsi->tx_busy += ring->tx_stats.tx_busy;
+ vsi->tx_linearize += ring->tx_stats.tx_linearize;
+ }
+
+ /* update Rx rings counters */
+ ice_for_each_rxq(vsi, i) {
+ ring = READ_ONCE(vsi->rx_rings[i]);
+ ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ vsi_stats->rx_packets += pkts;
+ vsi_stats->rx_bytes += bytes;
+ vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
+ vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
+ }
+
+ rcu_read_unlock();
+}
+
+/**
+ * ice_update_vsi_stats - Update VSI stats counters
+ * @vsi: the VSI to be updated
+ */
+static void ice_update_vsi_stats(struct ice_vsi *vsi)
+{
+ struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
+ struct ice_eth_stats *cur_es = &vsi->eth_stats;
+ struct ice_pf *pf = vsi->back;
+
+ if (test_bit(__ICE_DOWN, vsi->state) ||
+ test_bit(__ICE_CFG_BUSY, pf->state))
+ return;
+
+ /* get stats as recorded by Tx/Rx rings */
+ ice_update_vsi_ring_stats(vsi);
+
+ /* get VSI stats as recorded by the hardware */
+ ice_update_eth_stats(vsi);
+
+ cur_ns->tx_errors = cur_es->tx_errors;
+ cur_ns->rx_dropped = cur_es->rx_discards;
+ cur_ns->tx_dropped = cur_es->tx_discards;
+ cur_ns->multicast = cur_es->rx_multicast;
+
+ /* update some more netdev stats if this is main VSI */
+ if (vsi->type == ICE_VSI_PF) {
+ cur_ns->rx_crc_errors = pf->stats.crc_errors;
+ cur_ns->rx_errors = pf->stats.crc_errors +
+ pf->stats.illegal_bytes;
+ cur_ns->rx_length_errors = pf->stats.rx_len_errors;
+ }
+}
+
+/**
+ * ice_update_pf_stats - Update PF port stats counters
+ * @pf: PF whose stats needs to be updated
+ */
+static void ice_update_pf_stats(struct ice_pf *pf)
+{
+ struct ice_hw_port_stats *prev_ps, *cur_ps;
+ struct ice_hw *hw = &pf->hw;
+ u8 pf_id;
+
+ prev_ps = &pf->stats_prev;
+ cur_ps = &pf->stats;
+ pf_id = hw->pf_id;
+
+ ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),
+ pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,
+ &cur_ps->eth.rx_bytes);
+
+ ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),
+ pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,
+ &cur_ps->eth.rx_unicast);
+
+ ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),
+ pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,
+ &cur_ps->eth.rx_multicast);
+
+ ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),
+ pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,
+ &cur_ps->eth.rx_broadcast);
+
+ ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),
+ pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,
+ &cur_ps->eth.tx_bytes);
+
+ ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),
+ pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,
+ &cur_ps->eth.tx_unicast);
+
+ ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),
+ pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,
+ &cur_ps->eth.tx_multicast);
+
+ ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),
+ pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,
+ &cur_ps->eth.tx_broadcast);
+
+ ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
+ &prev_ps->tx_dropped_link_down,
+ &cur_ps->tx_dropped_link_down);
+
+ ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),
+ pf->stat_prev_loaded, &prev_ps->rx_size_64,
+ &cur_ps->rx_size_64);
+
+ ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),
+ pf->stat_prev_loaded, &prev_ps->rx_size_127,
+ &cur_ps->rx_size_127);
+
+ ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),
+ pf->stat_prev_loaded, &prev_ps->rx_size_255,
+ &cur_ps->rx_size_255);
+
+ ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),
+ pf->stat_prev_loaded, &prev_ps->rx_size_511,
+ &cur_ps->rx_size_511);
+
+ ice_stat_update40(hw, GLPRT_PRC1023H(pf_id),
+ GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
+
+ ice_stat_update40(hw, GLPRT_PRC1522H(pf_id),
+ GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
+
+ ice_stat_update40(hw, GLPRT_PRC9522H(pf_id),
+ GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_size_big, &cur_ps->rx_size_big);
+
+ ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),
+ pf->stat_prev_loaded, &prev_ps->tx_size_64,
+ &cur_ps->tx_size_64);
+
+ ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),
+ pf->stat_prev_loaded, &prev_ps->tx_size_127,
+ &cur_ps->tx_size_127);
+
+ ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),
+ pf->stat_prev_loaded, &prev_ps->tx_size_255,
+ &cur_ps->tx_size_255);
+
+ ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),
+ pf->stat_prev_loaded, &prev_ps->tx_size_511,
+ &cur_ps->tx_size_511);
+
+ ice_stat_update40(hw, GLPRT_PTC1023H(pf_id),
+ GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
+
+ ice_stat_update40(hw, GLPRT_PTC1522H(pf_id),
+ GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
+
+ ice_stat_update40(hw, GLPRT_PTC9522H(pf_id),
+ GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->tx_size_big, &cur_ps->tx_size_big);
+
+ ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
+
+ ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
+
+ ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
+
+ ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
+
+ ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
+ &prev_ps->crc_errors, &cur_ps->crc_errors);
+
+ ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
+
+ ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->mac_local_faults,
+ &cur_ps->mac_local_faults);
+
+ ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->mac_remote_faults,
+ &cur_ps->mac_remote_faults);
+
+ ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
+
+ ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_undersize, &cur_ps->rx_undersize);
+
+ ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_fragments, &cur_ps->rx_fragments);
+
+ ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_oversize, &cur_ps->rx_oversize);
+
+ ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_jabber, &cur_ps->rx_jabber);
+
+ pf->stat_prev_loaded = true;
+}
+
+/**
+ * ice_get_stats64 - get statistics for network device structure
+ * @netdev: network interface device structure
+ * @stats: main device statistics structure
+ */
+static
+void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct rtnl_link_stats64 *vsi_stats;
+ struct ice_vsi *vsi = np->vsi;
+
+ vsi_stats = &vsi->net_stats;
+
+ if (test_bit(__ICE_DOWN, vsi->state) || !vsi->num_txq || !vsi->num_rxq)
+ return;
+ /* netdev packet/byte stats come from ring counter. These are obtained
+ * by summing up ring counters (done by ice_update_vsi_ring_stats).
+ */
+ ice_update_vsi_ring_stats(vsi);
+ stats->tx_packets = vsi_stats->tx_packets;
+ stats->tx_bytes = vsi_stats->tx_bytes;
+ stats->rx_packets = vsi_stats->rx_packets;
+ stats->rx_bytes = vsi_stats->rx_bytes;
+
+ /* The rest of the stats can be read from the hardware but instead we
+ * just return values that the watchdog task has already obtained from
+ * the hardware.
+ */
+ stats->multicast = vsi_stats->multicast;
+ stats->tx_errors = vsi_stats->tx_errors;
+ stats->tx_dropped = vsi_stats->tx_dropped;
+ stats->rx_errors = vsi_stats->rx_errors;
+ stats->rx_dropped = vsi_stats->rx_dropped;
+ stats->rx_crc_errors = vsi_stats->rx_crc_errors;
+ stats->rx_length_errors = vsi_stats->rx_length_errors;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * ice_netpoll - polling "interrupt" handler
+ * @netdev: network interface device structure
+ *
+ * Used by netconsole to send skbs without having to re-enable interrupts.
+ * This is not called in the normal interrupt path.
+ */
+static void ice_netpoll(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ if (test_bit(__ICE_DOWN, vsi->state) ||
+ !test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ return;
+
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ ice_msix_clean_rings(0, vsi->q_vectors[i]);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+/**
+ * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
+ * @vsi: VSI having NAPI disabled
+ */
+static void ice_napi_disable_all(struct ice_vsi *vsi)
+{
+ int q_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+ napi_disable(&vsi->q_vectors[q_idx]->napi);
+}
+
+/**
+ * ice_down - Shutdown the connection
+ * @vsi: The VSI being stopped
+ */
+int ice_down(struct ice_vsi *vsi)
+{
+ int i, err;
+
+ /* Caller of this function is expected to set the
+ * vsi->state __ICE_DOWN bit
+ */
+ if (vsi->netdev) {
+ netif_carrier_off(vsi->netdev);
+ netif_tx_disable(vsi->netdev);
+ }
+
+ ice_vsi_dis_irq(vsi);
+ err = ice_vsi_stop_tx_rx_rings(vsi);
+ ice_napi_disable_all(vsi);
+
+ ice_for_each_txq(vsi, i)
+ ice_clean_tx_ring(vsi->tx_rings[i]);
+
+ ice_for_each_rxq(vsi, i)
+ ice_clean_rx_ring(vsi->rx_rings[i]);
+
+ if (err)
+ netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
+ vsi->vsi_num, vsi->vsw->sw_id);
+ return err;
+}
+
+/**
+ * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
+ * @vsi: VSI having resources allocated
+ *
+ * Return 0 on success, negative on failure
+ */
+static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
+{
+ int i, err;
+
+ if (!vsi->num_txq) {
+ dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ ice_for_each_txq(vsi, i) {
+ err = ice_setup_tx_ring(vsi->tx_rings[i]);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
+ * @vsi: VSI having resources allocated
+ *
+ * Return 0 on success, negative on failure
+ */
+static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
+{
+ int i, err;
+
+ if (!vsi->num_rxq) {
+ dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
+ vsi->vsi_num);
+ return -EINVAL;
+ }
+
+ ice_for_each_rxq(vsi, i) {
+ err = ice_setup_rx_ring(vsi->rx_rings[i]);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ice_vsi_req_irq - Request IRQ from the OS
+ * @vsi: The VSI IRQ is being requested for
+ * @basename: name for the vector
+ *
+ * Return 0 on success and a negative value on error
+ */
+static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
+{
+ struct ice_pf *pf = vsi->back;
+ int err = -EINVAL;
+
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ err = ice_vsi_req_irq_msix(vsi, basename);
+
+ return err;
+}
+
+/**
+ * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
+ * @vsi: the VSI having resources freed
+ */
+static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->tx_rings)
+ return;
+
+ ice_for_each_txq(vsi, i)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ ice_free_tx_ring(vsi->tx_rings[i]);
+}
+
+/**
+ * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
+ * @vsi: the VSI having resources freed
+ */
+static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
+{
+ int i;
+
+ if (!vsi->rx_rings)
+ return;
+
+ ice_for_each_rxq(vsi, i)
+ if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
+ ice_free_rx_ring(vsi->rx_rings[i]);
+}
+
+/**
+ * ice_vsi_open - Called when a network interface is made active
+ * @vsi: the VSI to open
+ *
+ * Initialization of the VSI
+ *
+ * Returns 0 on success, negative value on error
+ */
+static int ice_vsi_open(struct ice_vsi *vsi)
+{
+ char int_name[ICE_INT_NAME_STR_LEN];
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ /* allocate descriptors */
+ err = ice_vsi_setup_tx_rings(vsi);
+ if (err)
+ goto err_setup_tx;
+
+ err = ice_vsi_setup_rx_rings(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ err = ice_vsi_cfg(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+ err = ice_vsi_req_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
+ if (err)
+ goto err_set_qs;
+
+ err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
+ if (err)
+ goto err_set_qs;
+
+ err = ice_up_complete(vsi);
+ if (err)
+ goto err_up_complete;
+
+ return 0;
+
+err_up_complete:
+ ice_down(vsi);
+err_set_qs:
+ ice_vsi_free_irq(vsi);
+err_setup_rx:
+ ice_vsi_free_rx_rings(vsi);
+err_setup_tx:
+ ice_vsi_free_tx_rings(vsi);
+
+ return err;
+}
+
+/**
+ * ice_vsi_close - Shut down a VSI
+ * @vsi: the VSI being shut down
+ */
+static void ice_vsi_close(struct ice_vsi *vsi)
+{
+ if (!test_and_set_bit(__ICE_DOWN, vsi->state))
+ ice_down(vsi);
+
+ ice_vsi_free_irq(vsi);
+ ice_vsi_free_tx_rings(vsi);
+ ice_vsi_free_rx_rings(vsi);
+}
+
+/**
+ * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
+ * @vsi: the VSI being removed
+ */
+static void ice_rss_clean(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+
+ if (vsi->rss_hkey_user)
+ devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
+ if (vsi->rss_lut_user)
+ devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
+}
+
+/**
+ * ice_vsi_release - Delete a VSI and free its resources
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success or < 0 on error
+ */
+static int ice_vsi_release(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf;
+
+ if (!vsi->back)
+ return -ENODEV;
+ pf = vsi->back;
+
+ if (vsi->netdev) {
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_rss_clean(vsi);
+
+ /* Disable VSI and free resources */
+ ice_vsi_dis_irq(vsi);
+ ice_vsi_close(vsi);
+
+ /* reclaim interrupt vectors back to PF */
+ ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
+ pf->num_avail_msix += vsi->num_q_vectors;
+
+ ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
+ ice_vsi_delete(vsi);
+ ice_vsi_free_q_vectors(vsi);
+ ice_vsi_clear_rings(vsi);
+
+ ice_vsi_put_qs(vsi);
+ pf->q_left_tx += vsi->alloc_txq;
+ pf->q_left_rx += vsi->alloc_rxq;
+
+ ice_vsi_clear(vsi);
+
+ return 0;
+}
+
+/**
+ * ice_dis_vsi - pause a VSI
+ * @vsi: the VSI being paused
+ */
+static void ice_dis_vsi(struct ice_vsi *vsi)
+{
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ set_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->netdev && netif_running(vsi->netdev) &&
+ vsi->type == ICE_VSI_PF)
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+
+ ice_vsi_close(vsi);
+}
+
+/**
+ * ice_ena_vsi - resume a VSI
+ * @vsi: the VSI being resume
+ */
+static void ice_ena_vsi(struct ice_vsi *vsi)
+{
+ if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state))
+ return;
+
+ if (vsi->netdev && netif_running(vsi->netdev))
+ vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+ else if (ice_vsi_open(vsi))
+ /* this clears the DOWN bit */
+ dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n",
+ vsi->vsi_num, vsi->vsw->sw_id);
+}
+
+/**
+ * ice_pf_dis_all_vsi - Pause all VSIs on a PF
+ * @pf: the PF
+ */
+static void ice_pf_dis_all_vsi(struct ice_pf *pf)
+{
+ int v;
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v])
+ ice_dis_vsi(pf->vsi[v]);
+}
+
+/**
+ * ice_pf_ena_all_vsi - Resume all VSIs on a PF
+ * @pf: the PF
+ */
+static void ice_pf_ena_all_vsi(struct ice_pf *pf)
+{
+ int v;
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v])
+ ice_ena_vsi(pf->vsi[v]);
+}
+
+/**
+ * ice_rebuild - rebuild after reset
+ * @pf: pf to rebuild
+ */
+static void ice_rebuild(struct ice_pf *pf)
+{
+ struct device *dev = &pf->pdev->dev;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status ret;
+ int err;
+
+ if (test_bit(__ICE_DOWN, pf->state))
+ goto clear_recovery;
+
+ dev_dbg(dev, "rebuilding pf\n");
+
+ ret = ice_init_all_ctrlq(hw);
+ if (ret) {
+ dev_err(dev, "control queues init failed %d\n", ret);
+ goto fail_reset;
+ }
+
+ ret = ice_clear_pf_cfg(hw);
+ if (ret) {
+ dev_err(dev, "clear PF configuration failed %d\n", ret);
+ goto fail_reset;
+ }
+
+ ice_clear_pxe_mode(hw);
+
+ ret = ice_get_caps(hw);
+ if (ret) {
+ dev_err(dev, "ice_get_caps failed %d\n", ret);
+ goto fail_reset;
+ }
+
+ /* basic nic switch setup */
+ err = ice_setup_pf_sw(pf);
+ if (err) {
+ dev_err(dev, "ice_setup_pf_sw failed\n");
+ goto fail_reset;
+ }
+
+ /* start misc vector */
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(dev, "misc vector setup failed: %d\n", err);
+ goto fail_reset;
+ }
+ }
+
+ /* restart the VSIs that were rebuilt and running before the reset */
+ ice_pf_ena_all_vsi(pf);
+
+ return;
+
+fail_reset:
+ ice_shutdown_all_ctrlq(hw);
+ set_bit(__ICE_RESET_FAILED, pf->state);
+clear_recovery:
+ set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state);
+}
+
+/**
+ * ice_change_mtu - NDO callback to change the MTU
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u8 count = 0;
+
+ if (new_mtu == netdev->mtu) {
+ netdev_warn(netdev, "mtu is already %d\n", netdev->mtu);
+ return 0;
+ }
+
+ if (new_mtu < netdev->min_mtu) {
+ netdev_err(netdev, "new mtu invalid. min_mtu is %d\n",
+ netdev->min_mtu);
+ return -EINVAL;
+ } else if (new_mtu > netdev->max_mtu) {
+ netdev_err(netdev, "new mtu invalid. max_mtu is %d\n",
+ netdev->min_mtu);
+ return -EINVAL;
+ }
+ /* if a reset is in progress, wait for some time for it to complete */
+ do {
+ if (ice_is_reset_recovery_pending(pf->state)) {
+ count++;
+ usleep_range(1000, 2000);
+ } else {
+ break;
+ }
+
+ } while (count < 100);
+
+ if (count == 100) {
+ netdev_err(netdev, "can't change mtu. Device is busy\n");
+ return -EBUSY;
+ }
+
+ netdev->mtu = new_mtu;
+
+ /* if VSI is up, bring it down and then back up */
+ if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
+ int err;
+
+ err = ice_down(vsi);
+ if (err) {
+ netdev_err(netdev, "change mtu if_up err %d\n", err);
+ return err;
+ }
+
+ err = ice_up(vsi);
+ if (err) {
+ netdev_err(netdev, "change mtu if_up err %d\n", err);
+ return err;
+ }
+ }
+
+ netdev_dbg(netdev, "changed mtu to %d\n", new_mtu);
+ return 0;
+}
+
+/**
+ * ice_set_rss - Set RSS keys and lut
+ * @vsi: Pointer to VSI structure
+ * @seed: RSS hash seed
+ * @lut: Lookup table
+ * @lut_size: Lookup table size
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+
+ if (seed) {
+ struct ice_aqc_get_set_rss_keys *buf =
+ (struct ice_aqc_get_set_rss_keys *)seed;
+
+ status = ice_aq_set_rss_key(hw, vsi->vsi_num, buf);
+
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Cannot set RSS key, err %d aq_err %d\n",
+ status, hw->adminq.rq_last_status);
+ return -EIO;
+ }
+ }
+
+ if (lut) {
+ status = ice_aq_set_rss_lut(hw, vsi->vsi_num,
+ vsi->rss_lut_type, lut, lut_size);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Cannot set RSS lut, err %d aq_err %d\n",
+ status, hw->adminq.rq_last_status);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_rss - Get RSS keys and lut
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ enum ice_status status;
+
+ if (seed) {
+ struct ice_aqc_get_set_rss_keys *buf =
+ (struct ice_aqc_get_set_rss_keys *)seed;
+
+ status = ice_aq_get_rss_key(hw, vsi->vsi_num, buf);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Cannot get RSS key, err %d aq_err %d\n",
+ status, hw->adminq.rq_last_status);
+ return -EIO;
+ }
+ }
+
+ if (lut) {
+ status = ice_aq_get_rss_lut(hw, vsi->vsi_num,
+ vsi->rss_lut_type, lut, lut_size);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Cannot get RSS lut, err %d aq_err %d\n",
+ status, hw->adminq.rq_last_status);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_open - Called when a network interface becomes active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the netdev watchdog is enabled,
+ * and the stack is notified that the interface is ready.
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_open(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ int err;
+
+ netif_carrier_off(netdev);
+
+ err = ice_vsi_open(vsi);
+
+ if (err)
+ netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
+ vsi->vsi_num, vsi->vsw->sw_id);
+ return err;
+}
+
+/**
+ * ice_stop - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The stop entry point is called when an interface is de-activated by the OS,
+ * and the netdevice enters the DOWN state. The hardware is still under the
+ * driver's control, but the netdev interface is disabled.
+ *
+ * Returns success only - not allowed to fail
+ */
+static int ice_stop(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ ice_vsi_close(vsi);
+
+ return 0;
+}
+
+/**
+ * ice_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buffer
+ * @netdev: This port's netdev
+ * @features: Offload features that the stack believes apply
+ */
+static netdev_features_t
+ice_features_check(struct sk_buff *skb,
+ struct net_device __always_unused *netdev,
+ netdev_features_t features)
+{
+ size_t len;
+
+ /* No point in doing any of this if neither checksum nor GSO are
+ * being requested for this frame. We can rule out both by just
+ * checking for CHECKSUM_PARTIAL
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ /* We cannot support GSO if the MSS is going to be less than
+ * 64 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ features &= ~NETIF_F_GSO_MASK;
+
+ len = skb_network_header(skb) - skb->data;
+ if (len & ~(ICE_TXD_MACLEN_MAX))
+ goto out_rm_features;
+
+ len = skb_transport_header(skb) - skb_network_header(skb);
+ if (len & ~(ICE_TXD_IPLEN_MAX))
+ goto out_rm_features;
+
+ if (skb->encapsulation) {
+ len = skb_inner_network_header(skb) - skb_transport_header(skb);
+ if (len & ~(ICE_TXD_L4LEN_MAX))
+ goto out_rm_features;
+
+ len = skb_inner_transport_header(skb) -
+ skb_inner_network_header(skb);
+ if (len & ~(ICE_TXD_IPLEN_MAX))
+ goto out_rm_features;
+ }
+
+ return features;
+out_rm_features:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
+static const struct net_device_ops ice_netdev_ops = {
+ .ndo_open = ice_open,
+ .ndo_stop = ice_stop,
+ .ndo_start_xmit = ice_start_xmit,
+ .ndo_features_check = ice_features_check,
+ .ndo_set_rx_mode = ice_set_rx_mode,
+ .ndo_set_mac_address = ice_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = ice_change_mtu,
+ .ndo_get_stats64 = ice_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ice_netpoll,
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+ .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
+ .ndo_set_features = ice_set_features,
+ .ndo_fdb_add = ice_fdb_add,
+ .ndo_fdb_del = ice_fdb_del,
+};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
new file mode 100644
index 000000000000..fa7a69ac92b0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+
+/**
+ * ice_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands (0x0701)
+ */
+static enum ice_status
+ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length,
+ void *data, bool last_command, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ struct ice_aqc_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
+ cmd->module_typeid = module_typeid;
+ cmd->offset = cpu_to_le32(offset);
+ cmd->length = cpu_to_le16(length);
+
+ return ice_aq_send_cmd(hw, &desc, data, length, cd);
+}
+
+/**
+ * ice_check_sr_access_params - verify params for Shadow RAM R/W operations.
+ * @hw: pointer to the HW structure
+ * @offset: offset in words from module start
+ * @words: number of words to access
+ */
+static enum ice_status
+ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
+{
+ if ((offset + words) > hw->nvm.sr_words) {
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM error: offset beyond SR lmt.\n");
+ return ICE_ERR_PARAM;
+ }
+
+ if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) {
+ /* We can access only up to 4KB (one sector), in one AQ write */
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM error: tried to access %d words, limit is %d.\n",
+ words, ICE_SR_SECTOR_SIZE_IN_WORDS);
+ return ICE_ERR_PARAM;
+ }
+
+ if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) !=
+ (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) {
+ /* A single access cannot spread over two sectors */
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM error: cannot spread over two sectors.\n");
+ return ICE_ERR_PARAM;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_sr_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure
+ * @offset: offset in words from module start
+ * @words: number of words to read
+ * @data: buffer for words reads from Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Reads 16-bit word buffers from the Shadow RAM using the admin command.
+ */
+static enum ice_status
+ice_read_sr_aq(struct ice_hw *hw, u32 offset, u16 words, u16 *data,
+ bool last_command)
+{
+ enum ice_status status;
+
+ status = ice_check_sr_access_params(hw, offset, words);
+
+ /* values in "offset" and "words" parameters are sized as words
+ * (16 bits) but ice_aq_read_nvm expects these values in bytes.
+ * So do this conversion while calling ice_aq_read_nvm.
+ */
+ if (!status)
+ status = ice_aq_read_nvm(hw, 0, 2 * offset, 2 * words, data,
+ last_command, NULL);
+
+ return status;
+}
+
+/**
+ * ice_read_sr_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_aq method.
+ */
+static enum ice_status
+ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
+{
+ enum ice_status status;
+
+ status = ice_read_sr_aq(hw, offset, 1, data, true);
+ if (!status)
+ *data = le16_to_cpu(*(__le16 *)data);
+
+ return status;
+}
+
+/**
+ * ice_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * This function will request NVM ownership.
+ */
+static enum
+ice_status ice_acquire_nvm(struct ice_hw *hw,
+ enum ice_aq_res_access_type access)
+{
+ if (hw->nvm.blank_nvm_mode)
+ return 0;
+
+ return ice_acquire_res(hw, ICE_NVM_RES_ID, access);
+}
+
+/**
+ * ice_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * This function will release NVM ownership.
+ */
+static void ice_release_nvm(struct ice_hw *hw)
+{
+ if (hw->nvm.blank_nvm_mode)
+ return;
+
+ ice_release_res(hw, ICE_NVM_RES_ID);
+}
+
+/**
+ * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
+ */
+static enum ice_status
+ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+{
+ enum ice_status status;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (!status) {
+ status = ice_read_sr_word_aq(hw, offset, data);
+ ice_release_nvm(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ice_init_nvm - initializes NVM setting
+ * @hw: pointer to the hw struct
+ *
+ * This function reads and populates NVM settings such as Shadow RAM size,
+ * max_timeout, and blank_nvm_mode
+ */
+enum ice_status ice_init_nvm(struct ice_hw *hw)
+{
+ struct ice_nvm_info *nvm = &hw->nvm;
+ u16 eetrack_lo, eetrack_hi;
+ enum ice_status status = 0;
+ u32 fla, gens_stat;
+ u8 sr_size;
+
+ /* The SR size is stored regardless of the nvm programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = rd32(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = rd32(hw, GLNVM_FLA);
+ if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
+ nvm->blank_nvm_mode = false;
+ } else { /* Blank programming mode */
+ nvm->blank_nvm_mode = true;
+ status = ICE_ERR_NVM_BLANK_MODE;
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM init error: unsupported blank mode.\n");
+ return status;
+ }
+
+ status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &hw->nvm.ver);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read DEV starter version.\n");
+ return status;
+ }
+
+ status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
+ return status;
+ }
+ status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
+ return status;
+ }
+
+ hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
new file mode 100644
index 000000000000..f57c414bc0a9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_OSDEP_H_
+#define _ICE_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/io.h>
+#ifndef CONFIG_64BIT
+#include <linux/io-64-nonatomic-lo-hi.h>
+#endif
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+
+#define ice_flush(a) rd32((a), GLGEN_STAT)
+#define ICE_M(m, s) ((m) << (s))
+
+struct ice_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ size_t size;
+};
+
+#define ice_hw_to_dev(ptr) \
+ (&(container_of((ptr), struct ice_pf, hw))->pdev->dev)
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define ice_debug(hw, type, fmt, args...) \
+ dev_dbg(ice_hw_to_dev(hw), fmt, ##args)
+
+#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+ print_hex_dump_debug(KBUILD_MODNAME " ", \
+ DUMP_PREFIX_OFFSET, rowsize, \
+ groupsize, buf, len, false)
+#else
+#define ice_debug(hw, type, fmt, args...) \
+do { \
+ if ((type) & (hw)->debug_mask) \
+ dev_info(ice_hw_to_dev(hw), fmt, ##args); \
+} while (0)
+
+#ifdef DEBUG
+#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+do { \
+ if ((type) & (hw)->debug_mask) \
+ print_hex_dump_debug(KBUILD_MODNAME, \
+ DUMP_PREFIX_OFFSET, \
+ rowsize, groupsize, buf, \
+ len, false); \
+} while (0)
+#else
+#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \
+do { \
+ struct ice_hw *hw_l = hw; \
+ if ((type) & (hw_l)->debug_mask) { \
+ u16 len_l = len; \
+ u8 *buf_l = buf; \
+ int i; \
+ for (i = 0; i < (len_l - 16); i += 16) \
+ ice_debug(hw_l, type, "0x%04X %16ph\n",\
+ i, ((buf_l) + i)); \
+ if (i < len_l) \
+ ice_debug(hw_l, type, "0x%04X %*ph\n", \
+ i, ((len_l) - i), ((buf_l) + i));\
+ } \
+} while (0)
+#endif /* DEBUG */
+#endif /* CONFIG_DYNAMIC_DEBUG */
+
+#endif /* _ICE_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
new file mode 100644
index 000000000000..f16ff3e4a840
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -0,0 +1,1659 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_sched.h"
+
+/**
+ * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
+ * @pi: port information structure
+ * @info: Scheduler element information from firmware
+ *
+ * This function inserts the root node of the scheduling tree topology
+ * to the SW DB.
+ */
+static enum ice_status
+ice_sched_add_root_node(struct ice_port_info *pi,
+ struct ice_aqc_txsched_elem_data *info)
+{
+ struct ice_sched_node *root;
+ struct ice_hw *hw;
+ u16 max_children;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+
+ root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
+ if (!root)
+ return ICE_ERR_NO_MEMORY;
+
+ max_children = le16_to_cpu(hw->layer_info[0].max_children);
+ root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
+ sizeof(*root), GFP_KERNEL);
+ if (!root->children) {
+ devm_kfree(ice_hw_to_dev(hw), root);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ memcpy(&root->info, info, sizeof(*info));
+ pi->root = root;
+ return 0;
+}
+
+/**
+ * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
+ * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
+ * @teid: node teid to search
+ *
+ * This function searches for a node matching the teid in the scheduling tree
+ * from the SW DB. The search is recursive and is restricted by the number of
+ * layers it has searched through; stopping at the max supported layer.
+ *
+ * This function needs to be called when holding the port_info->sched_lock
+ */
+struct ice_sched_node *
+ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
+{
+ u16 i;
+
+ /* The TEID is same as that of the start_node */
+ if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
+ return start_node;
+
+ /* The node has no children or is at the max layer */
+ if (!start_node->num_children ||
+ start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
+ start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
+ return NULL;
+
+ /* Check if teid matches to any of the children nodes */
+ for (i = 0; i < start_node->num_children; i++)
+ if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
+ return start_node->children[i];
+
+ /* Search within each child's sub-tree */
+ for (i = 0; i < start_node->num_children; i++) {
+ struct ice_sched_node *tmp;
+
+ tmp = ice_sched_find_node_by_teid(start_node->children[i],
+ teid);
+ if (tmp)
+ return tmp;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_sched_add_node - Insert the Tx scheduler node in SW DB
+ * @pi: port information structure
+ * @layer: Scheduler layer of the node
+ * @info: Scheduler element information from firmware
+ *
+ * This function inserts a scheduler node to the SW DB.
+ */
+enum ice_status
+ice_sched_add_node(struct ice_port_info *pi, u8 layer,
+ struct ice_aqc_txsched_elem_data *info)
+{
+ struct ice_sched_node *parent;
+ struct ice_sched_node *node;
+ struct ice_hw *hw;
+ u16 max_children;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+
+ /* A valid parent node should be there */
+ parent = ice_sched_find_node_by_teid(pi->root,
+ le32_to_cpu(info->parent_teid));
+ if (!parent) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Parent Node not found for parent_teid=0x%x\n",
+ le32_to_cpu(info->parent_teid));
+ return ICE_ERR_PARAM;
+ }
+
+ node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ICE_ERR_NO_MEMORY;
+ max_children = le16_to_cpu(hw->layer_info[layer].max_children);
+ if (max_children) {
+ node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children,
+ sizeof(*node), GFP_KERNEL);
+ if (!node->children) {
+ devm_kfree(ice_hw_to_dev(hw), node);
+ return ICE_ERR_NO_MEMORY;
+ }
+ }
+
+ node->in_use = true;
+ node->parent = parent;
+ node->tx_sched_layer = layer;
+ parent->children[parent->num_children++] = node;
+ memcpy(&node->info, info, sizeof(*info));
+ return 0;
+}
+
+/**
+ * ice_aq_delete_sched_elems - delete scheduler elements
+ * @hw: pointer to the hw struct
+ * @grps_req: number of groups to delete
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @grps_del: returns total number of elements deleted
+ * @cd: pointer to command details structure or NULL
+ *
+ * Delete scheduling elements (0x040F)
+ */
+static enum ice_status
+ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
+ struct ice_aqc_delete_elem *buf, u16 buf_size,
+ u16 *grps_del, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_move_delete_elem *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.add_move_delete_elem;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd->num_grps_req = cpu_to_le16(grps_req);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && grps_del)
+ *grps_del = le16_to_cpu(cmd->num_grps_updated);
+
+ return status;
+}
+
+/**
+ * ice_sched_remove_elems - remove nodes from hw
+ * @hw: pointer to the hw struct
+ * @parent: pointer to the parent node
+ * @num_nodes: number of nodes
+ * @node_teids: array of node teids to be deleted
+ *
+ * This function remove nodes from hw
+ */
+static enum ice_status
+ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
+ u16 num_nodes, u32 *node_teids)
+{
+ struct ice_aqc_delete_elem *buf;
+ u16 i, num_groups_removed = 0;
+ enum ice_status status;
+ u16 buf_size;
+
+ buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
+ buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+ buf->hdr.parent_teid = parent->info.node_teid;
+ buf->hdr.num_elems = cpu_to_le16(num_nodes);
+ for (i = 0; i < num_nodes; i++)
+ buf->teid[i] = cpu_to_le32(node_teids[i]);
+ status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
+ &num_groups_removed, NULL);
+ if (status || num_groups_removed != 1)
+ ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
+ devm_kfree(ice_hw_to_dev(hw), buf);
+ return status;
+}
+
+/**
+ * ice_sched_get_first_node - get the first node of the given layer
+ * @hw: pointer to the hw struct
+ * @parent: pointer the base node of the subtree
+ * @layer: layer number
+ *
+ * This function retrieves the first node of the given layer from the subtree
+ */
+static struct ice_sched_node *
+ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent,
+ u8 layer)
+{
+ u8 i;
+
+ if (layer < hw->sw_entry_point_layer)
+ return NULL;
+ for (i = 0; i < parent->num_children; i++) {
+ struct ice_sched_node *node = parent->children[i];
+
+ if (node) {
+ if (node->tx_sched_layer == layer)
+ return node;
+ /* this recursion is intentional, and wouldn't
+ * go more than 9 calls
+ */
+ return ice_sched_get_first_node(hw, node, layer);
+ }
+ }
+ return NULL;
+}
+
+/**
+ * ice_sched_get_tc_node - get pointer to TC node
+ * @pi: port information structure
+ * @tc: TC number
+ *
+ * This function returns the TC node pointer
+ */
+struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
+{
+ u8 i;
+
+ if (!pi)
+ return NULL;
+ for (i = 0; i < pi->root->num_children; i++)
+ if (pi->root->children[i]->tc_num == tc)
+ return pi->root->children[i];
+ return NULL;
+}
+
+/**
+ * ice_free_sched_node - Free a Tx scheduler node from SW DB
+ * @pi: port information structure
+ * @node: pointer to the ice_sched_node struct
+ *
+ * This function frees up a node from SW DB as well as from HW
+ *
+ * This function needs to be called with the port_info->sched_lock held
+ */
+void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
+{
+ struct ice_sched_node *parent;
+ struct ice_hw *hw = pi->hw;
+ u8 i, j;
+
+ /* Free the children before freeing up the parent node
+ * The parent array is updated below and that shifts the nodes
+ * in the array. So always pick the first child if num children > 0
+ */
+ while (node->num_children)
+ ice_free_sched_node(pi, node->children[0]);
+
+ /* Leaf, TC and root nodes can't be deleted by SW */
+ if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
+ node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
+ node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
+ node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
+ u32 teid = le32_to_cpu(node->info.node_teid);
+ enum ice_status status;
+
+ status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
+ if (status)
+ ice_debug(hw, ICE_DBG_SCHED,
+ "remove element failed %d\n", status);
+ }
+ parent = node->parent;
+ /* root has no parent */
+ if (parent) {
+ struct ice_sched_node *p, *tc_node;
+
+ /* update the parent */
+ for (i = 0; i < parent->num_children; i++)
+ if (parent->children[i] == node) {
+ for (j = i + 1; j < parent->num_children; j++)
+ parent->children[j - 1] =
+ parent->children[j];
+ parent->num_children--;
+ break;
+ }
+
+ /* search for previous sibling that points to this node and
+ * remove the reference
+ */
+ tc_node = ice_sched_get_tc_node(pi, node->tc_num);
+ if (!tc_node) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Invalid TC number %d\n", node->tc_num);
+ goto err_exit;
+ }
+ p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);
+ while (p) {
+ if (p->sibling == node) {
+ p->sibling = node->sibling;
+ break;
+ }
+ p = p->sibling;
+ }
+ }
+err_exit:
+ /* leaf nodes have no children */
+ if (node->children)
+ devm_kfree(ice_hw_to_dev(hw), node->children);
+ devm_kfree(ice_hw_to_dev(hw), node);
+}
+
+/**
+ * ice_aq_get_dflt_topo - gets default scheduler topology
+ * @hw: pointer to the hw struct
+ * @lport: logical port number
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_branches: returns total number of queue to port branches
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get default scheduler topology (0x400)
+ */
+static enum ice_status
+ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
+ struct ice_aqc_get_topo_elem *buf, u16 buf_size,
+ u8 *num_branches, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_topo *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_topo;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
+ cmd->port_num = lport;
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && num_branches)
+ *num_branches = cmd->num_branches;
+
+ return status;
+}
+
+/**
+ * ice_aq_add_sched_elems - adds scheduling element
+ * @hw: pointer to the hw struct
+ * @grps_req: the number of groups that are requested to be added
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @grps_added: returns total number of groups added
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add scheduling elements (0x0401)
+ */
+static enum ice_status
+ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
+ struct ice_aqc_add_elem *buf, u16 buf_size,
+ u16 *grps_added, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_move_delete_elem *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.add_move_delete_elem;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->num_grps_req = cpu_to_le16(grps_req);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && grps_added)
+ *grps_added = le16_to_cpu(cmd->num_grps_updated);
+
+ return status;
+}
+
+/**
+ * ice_suspend_resume_elems - suspend/resume scheduler elements
+ * @hw: pointer to the hw struct
+ * @elems_req: number of elements to suspend
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements suspended
+ * @cd: pointer to command details structure or NULL
+ * @cmd_code: command code for suspend or resume
+ *
+ * suspend/resume scheduler elements
+ */
+static enum ice_status
+ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_suspend_resume_elem *buf, u16 buf_size,
+ u16 *elems_ret, struct ice_sq_cd *cd,
+ enum ice_adminq_opc cmd_code)
+{
+ struct ice_aqc_get_cfg_elem *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_update_elem;
+ ice_fill_dflt_direct_cmd_desc(&desc, cmd_code);
+ cmd->num_elem_req = cpu_to_le16(elems_req);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && elems_ret)
+ *elems_ret = le16_to_cpu(cmd->num_elem_resp);
+ return status;
+}
+
+/**
+ * ice_aq_suspend_sched_elems - suspend scheduler elements
+ * @hw: pointer to the hw struct
+ * @elems_req: number of elements to suspend
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements suspended
+ * @cd: pointer to command details structure or NULL
+ *
+ * Suspend scheduling elements (0x0409)
+ */
+static enum ice_status
+ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_suspend_resume_elem *buf,
+ u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
+{
+ return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
+ cd, ice_aqc_opc_suspend_sched_elems);
+}
+
+/**
+ * ice_aq_resume_sched_elems - resume scheduler elements
+ * @hw: pointer to the hw struct
+ * @elems_req: number of elements to resume
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements resumed
+ * @cd: pointer to command details structure or NULL
+ *
+ * resume scheduling elements (0x040A)
+ */
+static enum ice_status
+ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_suspend_resume_elem *buf,
+ u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
+{
+ return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret,
+ cd, ice_aqc_opc_resume_sched_elems);
+}
+
+/**
+ * ice_aq_query_sched_res - query scheduler resource
+ * @hw: pointer to the hw struct
+ * @buf_size: buffer size in bytes
+ * @buf: pointer to buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Query scheduler resource allocation (0x0412)
+ */
+static enum ice_status
+ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
+ struct ice_aqc_query_txsched_res_resp *buf,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_sched_suspend_resume_elems - suspend or resume hw nodes
+ * @hw: pointer to the hw struct
+ * @num_nodes: number of nodes
+ * @node_teids: array of node teids to be suspended or resumed
+ * @suspend: true means suspend / false means resume
+ *
+ * This function suspends or resumes hw nodes
+ */
+static enum ice_status
+ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
+ bool suspend)
+{
+ struct ice_aqc_suspend_resume_elem *buf;
+ u16 i, buf_size, num_elem_ret = 0;
+ enum ice_status status;
+
+ buf_size = sizeof(*buf) * num_nodes;
+ buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < num_nodes; i++)
+ buf->teid[i] = cpu_to_le32(node_teids[i]);
+
+ if (suspend)
+ status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
+ buf_size, &num_elem_ret,
+ NULL);
+ else
+ status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
+ buf_size, &num_elem_ret,
+ NULL);
+ if (status || num_elem_ret != num_nodes)
+ ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
+
+ devm_kfree(ice_hw_to_dev(hw), buf);
+ return status;
+}
+
+/**
+ * ice_sched_clear_tx_topo - clears the schduler tree nodes
+ * @pi: port information structure
+ *
+ * This function removes all the nodes from HW as well as from SW DB.
+ */
+static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
+{
+ struct ice_sched_agg_info *agg_info;
+ struct ice_sched_vsi_info *vsi_elem;
+ struct ice_sched_agg_info *atmp;
+ struct ice_sched_vsi_info *tmp;
+ struct ice_hw *hw;
+
+ if (!pi)
+ return;
+
+ hw = pi->hw;
+
+ list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_vsi_info *vtmp;
+
+ list_for_each_entry_safe(agg_vsi_info, vtmp,
+ &agg_info->agg_vsi_list, list_entry) {
+ list_del(&agg_vsi_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
+ }
+ }
+
+ /* remove the vsi list */
+ list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list,
+ list_entry) {
+ list_del(&vsi_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), vsi_elem);
+ }
+
+ if (pi->root) {
+ ice_free_sched_node(pi, pi->root);
+ pi->root = NULL;
+ }
+}
+
+/**
+ * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
+ * @pi: port information structure
+ *
+ * Cleanup scheduling elements from SW DB
+ */
+static void ice_sched_clear_port(struct ice_port_info *pi)
+{
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return;
+
+ pi->port_state = ICE_SCHED_PORT_STATE_INIT;
+ mutex_lock(&pi->sched_lock);
+ ice_sched_clear_tx_topo(pi);
+ mutex_unlock(&pi->sched_lock);
+ mutex_destroy(&pi->sched_lock);
+}
+
+/**
+ * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
+ * @hw: pointer to the hw struct
+ *
+ * Cleanup scheduling elements from SW DB for all the ports
+ */
+void ice_sched_cleanup_all(struct ice_hw *hw)
+{
+ if (!hw || !hw->port_info)
+ return;
+
+ if (hw->layer_info)
+ devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
+
+ ice_sched_clear_port(hw->port_info);
+
+ hw->num_tx_sched_layers = 0;
+ hw->num_tx_sched_phys_layers = 0;
+ hw->flattened_layers = 0;
+ hw->max_cgds = 0;
+}
+
+/**
+ * ice_sched_create_vsi_info_entry - create an empty new VSI entry
+ * @pi: port information structure
+ * @vsi_id: VSI Id
+ *
+ * This function creates a new VSI entry and adds it to list
+ */
+static struct ice_sched_vsi_info *
+ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
+{
+ struct ice_sched_vsi_info *vsi_elem;
+
+ if (!pi)
+ return NULL;
+
+ vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem),
+ GFP_KERNEL);
+ if (!vsi_elem)
+ return NULL;
+
+ list_add(&vsi_elem->list_entry, &pi->vsi_info_list);
+ vsi_elem->vsi_id = vsi_id;
+ return vsi_elem;
+}
+
+/**
+ * ice_sched_add_elems - add nodes to hw and SW DB
+ * @pi: port information structure
+ * @tc_node: pointer to the branch node
+ * @parent: pointer to the parent node
+ * @layer: layer number to add nodes
+ * @num_nodes: number of nodes
+ * @num_nodes_added: pointer to num nodes added
+ * @first_node_teid: if new nodes are added then return the teid of first node
+ *
+ * This function add nodes to hw as well as to SW DB for a given layer
+ */
+static enum ice_status
+ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ struct ice_sched_node *parent, u8 layer, u16 num_nodes,
+ u16 *num_nodes_added, u32 *first_node_teid)
+{
+ struct ice_sched_node *prev, *new_node;
+ struct ice_aqc_add_elem *buf;
+ u16 i, num_groups_added = 0;
+ enum ice_status status = 0;
+ struct ice_hw *hw = pi->hw;
+ u16 buf_size;
+ u32 teid;
+
+ buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1);
+ buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->hdr.parent_teid = parent->info.node_teid;
+ buf->hdr.num_elems = cpu_to_le16(num_nodes);
+ for (i = 0; i < num_nodes; i++) {
+ buf->generic[i].parent_teid = parent->info.node_teid;
+ buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
+ buf->generic[i].data.valid_sections =
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
+ ICE_AQC_ELEM_VALID_EIR;
+ buf->generic[i].data.generic = 0;
+ buf->generic[i].data.cir_bw.bw_profile_idx =
+ ICE_SCHED_DFLT_RL_PROF_ID;
+ buf->generic[i].data.eir_bw.bw_profile_idx =
+ ICE_SCHED_DFLT_RL_PROF_ID;
+ }
+
+ status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
+ &num_groups_added, NULL);
+ if (status || num_groups_added != 1) {
+ ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
+ devm_kfree(ice_hw_to_dev(hw), buf);
+ return ICE_ERR_CFG;
+ }
+
+ *num_nodes_added = num_nodes;
+ /* add nodes to the SW DB */
+ for (i = 0; i < num_nodes; i++) {
+ status = ice_sched_add_node(pi, layer, &buf->generic[i]);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "add nodes in SW DB failed status =%d\n",
+ status);
+ break;
+ }
+
+ teid = le32_to_cpu(buf->generic[i].node_teid);
+ new_node = ice_sched_find_node_by_teid(parent, teid);
+
+ if (!new_node) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Node is missing for teid =%d\n", teid);
+ break;
+ }
+
+ new_node->sibling = NULL;
+ new_node->tc_num = tc_node->tc_num;
+
+ /* add it to previous node sibling pointer */
+ /* Note: siblings are not linked across branches */
+ prev = ice_sched_get_first_node(hw, tc_node, layer);
+
+ if (prev && prev != new_node) {
+ while (prev->sibling)
+ prev = prev->sibling;
+ prev->sibling = new_node;
+ }
+
+ if (i == 0)
+ *first_node_teid = teid;
+ }
+
+ devm_kfree(ice_hw_to_dev(hw), buf);
+ return status;
+}
+
+/**
+ * ice_sched_add_nodes_to_layer - Add nodes to a given layer
+ * @pi: port information structure
+ * @tc_node: pointer to TC node
+ * @parent: pointer to parent node
+ * @layer: layer number to add nodes
+ * @num_nodes: number of nodes to be added
+ * @first_node_teid: pointer to the first node teid
+ * @num_nodes_added: pointer to number of nodes added
+ *
+ * This function add nodes to a given layer.
+ */
+static enum ice_status
+ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
+ struct ice_sched_node *tc_node,
+ struct ice_sched_node *parent, u8 layer,
+ u16 num_nodes, u32 *first_node_teid,
+ u16 *num_nodes_added)
+{
+ u32 *first_teid_ptr = first_node_teid;
+ u16 new_num_nodes, max_child_nodes;
+ enum ice_status status = 0;
+ struct ice_hw *hw = pi->hw;
+ u16 num_added = 0;
+ u32 temp;
+
+ if (!num_nodes)
+ return status;
+
+ if (!parent || layer < hw->sw_entry_point_layer)
+ return ICE_ERR_PARAM;
+
+ *num_nodes_added = 0;
+
+ /* max children per node per layer */
+ max_child_nodes =
+ le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
+
+ /* current number of children + required nodes exceed max children ? */
+ if ((parent->num_children + num_nodes) > max_child_nodes) {
+ /* Fail if the parent is a TC node */
+ if (parent == tc_node)
+ return ICE_ERR_CFG;
+
+ /* utilize all the spaces if the parent is not full */
+ if (parent->num_children < max_child_nodes) {
+ new_num_nodes = max_child_nodes - parent->num_children;
+ /* this recursion is intentional, and wouldn't
+ * go more than 2 calls
+ */
+ status = ice_sched_add_nodes_to_layer(pi, tc_node,
+ parent, layer,
+ new_num_nodes,
+ first_node_teid,
+ &num_added);
+ if (status)
+ return status;
+
+ *num_nodes_added += num_added;
+ }
+ /* Don't modify the first node teid memory if the first node was
+ * added already in the above call. Instead send some temp
+ * memory for all other recursive calls.
+ */
+ if (num_added)
+ first_teid_ptr = &temp;
+
+ new_num_nodes = num_nodes - num_added;
+
+ /* This parent is full, try the next sibling */
+ parent = parent->sibling;
+
+ /* this recursion is intentional, for 1024 queues
+ * per VSI, it goes max of 16 iterations.
+ * 1024 / 8 = 128 layer 8 nodes
+ * 128 /8 = 16 (add 8 nodes per iteration)
+ */
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
+ layer, new_num_nodes,
+ first_teid_ptr,
+ &num_added);
+ *num_nodes_added += num_added;
+ return status;
+ }
+
+ status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
+ num_nodes_added, first_node_teid);
+ return status;
+}
+
+/**
+ * ice_sched_get_qgrp_layer - get the current queue group layer number
+ * @hw: pointer to the hw struct
+ *
+ * This function returns the current queue group layer number
+ */
+static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
+{
+ /* It's always total layers - 1, the array is 0 relative so -2 */
+ return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
+}
+
+/**
+ * ice_sched_get_vsi_layer - get the current VSI layer number
+ * @hw: pointer to the hw struct
+ *
+ * This function returns the current VSI layer number
+ */
+static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
+{
+ /* Num Layers VSI layer
+ * 9 6
+ * 7 4
+ * 5 or less sw_entry_point_layer
+ */
+ /* calculate the vsi layer based on number of layers. */
+ if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
+ u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
+
+ if (layer > hw->sw_entry_point_layer)
+ return layer;
+ }
+ return hw->sw_entry_point_layer;
+}
+
+/**
+ * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer
+ * @pi: pointer to the port info struct
+ * @layer: layer number
+ *
+ * This function calculates the number of nodes present in the scheduler tree
+ * including all the branches for a given layer
+ */
+static u16
+ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer)
+{
+ struct ice_hw *hw;
+ u16 num_nodes = 0;
+ u8 i;
+
+ if (!pi)
+ return num_nodes;
+
+ hw = pi->hw;
+
+ /* Calculate the number of nodes for all TCs */
+ for (i = 0; i < pi->root->num_children; i++) {
+ struct ice_sched_node *tc_node, *node;
+
+ tc_node = pi->root->children[i];
+
+ /* Get the first node */
+ node = ice_sched_get_first_node(hw, tc_node, layer);
+ if (!node)
+ continue;
+
+ /* count the siblings */
+ while (node) {
+ num_nodes++;
+ node = node->sibling;
+ }
+ }
+
+ return num_nodes;
+}
+
+/**
+ * ice_sched_val_max_nodes - check max number of nodes reached or not
+ * @pi: port information structure
+ * @new_num_nodes_per_layer: pointer to the new number of nodes array
+ *
+ * This function checks whether the scheduler tree layers have enough space to
+ * add new nodes
+ */
+static enum ice_status
+ice_sched_validate_for_max_nodes(struct ice_port_info *pi,
+ u16 *new_num_nodes_per_layer)
+{
+ struct ice_hw *hw = pi->hw;
+ u8 i, qg_layer;
+ u16 num_nodes;
+
+ qg_layer = ice_sched_get_qgrp_layer(hw);
+
+ /* walk through all the layers from SW entry point to qgroup layer */
+ for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) {
+ num_nodes = ice_sched_get_num_nodes_per_layer(pi, i);
+ if (num_nodes + new_num_nodes_per_layer[i] >
+ le16_to_cpu(hw->layer_info[i].max_pf_nodes)) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "max nodes reached for layer = %d\n", i);
+ return ICE_ERR_CFG;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
+ * @pi: port information structure
+ *
+ * This function removes the leaf node that was created by the FW
+ * during initialization
+ */
+static void
+ice_rm_dflt_leaf_node(struct ice_port_info *pi)
+{
+ struct ice_sched_node *node;
+
+ node = pi->root;
+ while (node) {
+ if (!node->num_children)
+ break;
+ node = node->children[0];
+ }
+ if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
+ u32 teid = le32_to_cpu(node->info.node_teid);
+ enum ice_status status;
+
+ /* remove the default leaf node */
+ status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
+ if (!status)
+ ice_free_sched_node(pi, node);
+ }
+}
+
+/**
+ * ice_sched_rm_dflt_nodes - free the default nodes in the tree
+ * @pi: port information structure
+ *
+ * This function frees all the nodes except root and TC that were created by
+ * the FW during initialization
+ */
+static void
+ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
+{
+ struct ice_sched_node *node;
+
+ ice_rm_dflt_leaf_node(pi);
+
+ /* remove the default nodes except TC and root nodes */
+ node = pi->root;
+ while (node) {
+ if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
+ node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
+ node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
+ ice_free_sched_node(pi, node);
+ break;
+ }
+
+ if (!node->num_children)
+ break;
+ node = node->children[0];
+ }
+}
+
+/**
+ * ice_sched_init_port - Initialize scheduler by querying information from FW
+ * @pi: port info structure for the tree to cleanup
+ *
+ * This function is the initial call to find the total number of Tx scheduler
+ * resources, default topology created by firmware and storing the information
+ * in SW DB.
+ */
+enum ice_status ice_sched_init_port(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_topo_elem *buf;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u8 num_branches;
+ u16 num_elems;
+ u8 i, j;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ hw = pi->hw;
+
+ /* Query the Default Topology from FW */
+ buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES,
+ sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Query default scheduling tree topology */
+ status = ice_aq_get_dflt_topo(hw, pi->lport, buf,
+ sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES,
+ &num_branches, NULL);
+ if (status)
+ goto err_init_port;
+
+ /* num_branches should be between 1-8 */
+ if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
+ ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
+ num_branches);
+ status = ICE_ERR_PARAM;
+ goto err_init_port;
+ }
+
+ /* get the number of elements on the default/first branch */
+ num_elems = le16_to_cpu(buf[0].hdr.num_elems);
+
+ /* num_elems should always be between 1-9 */
+ if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
+ ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
+ num_elems);
+ status = ICE_ERR_PARAM;
+ goto err_init_port;
+ }
+
+ /* If the last node is a leaf node then the index of the Q group
+ * layer is two less than the number of elements.
+ */
+ if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
+ ICE_AQC_ELEM_TYPE_LEAF)
+ pi->last_node_teid =
+ le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
+ else
+ pi->last_node_teid =
+ le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
+
+ /* Insert the Tx Sched root node */
+ status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
+ if (status)
+ goto err_init_port;
+
+ /* Parse the default tree and cache the information */
+ for (i = 0; i < num_branches; i++) {
+ num_elems = le16_to_cpu(buf[i].hdr.num_elems);
+
+ /* Skip root element as already inserted */
+ for (j = 1; j < num_elems; j++) {
+ /* update the sw entry point */
+ if (buf[0].generic[j].data.elem_type ==
+ ICE_AQC_ELEM_TYPE_ENTRY_POINT)
+ hw->sw_entry_point_layer = j;
+
+ status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
+ if (status)
+ goto err_init_port;
+ }
+ }
+
+ /* Remove the default nodes. */
+ if (pi->root)
+ ice_sched_rm_dflt_nodes(pi);
+
+ /* initialize the port for handling the scheduler tree */
+ pi->port_state = ICE_SCHED_PORT_STATE_READY;
+ mutex_init(&pi->sched_lock);
+ INIT_LIST_HEAD(&pi->agg_list);
+ INIT_LIST_HEAD(&pi->vsi_info_list);
+
+err_init_port:
+ if (status && pi->root) {
+ ice_free_sched_node(pi, pi->root);
+ pi->root = NULL;
+ }
+
+ devm_kfree(ice_hw_to_dev(hw), buf);
+ return status;
+}
+
+/**
+ * ice_sched_query_res_alloc - query the FW for num of logical sched layers
+ * @hw: pointer to the HW struct
+ *
+ * query FW for allocated scheduler resources and store in HW struct
+ */
+enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
+{
+ struct ice_aqc_query_txsched_res_resp *buf;
+ enum ice_status status = 0;
+
+ if (hw->layer_info)
+ return status;
+
+ buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
+ if (status)
+ goto sched_query_out;
+
+ hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
+ hw->num_tx_sched_phys_layers =
+ le16_to_cpu(buf->sched_props.phys_levels);
+ hw->flattened_layers = buf->sched_props.flattening_bitmap;
+ hw->max_cgds = buf->sched_props.max_pf_cgds;
+
+ hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
+ (hw->num_tx_sched_layers *
+ sizeof(*hw->layer_info)),
+ GFP_KERNEL);
+ if (!hw->layer_info) {
+ status = ICE_ERR_NO_MEMORY;
+ goto sched_query_out;
+ }
+
+sched_query_out:
+ devm_kfree(ice_hw_to_dev(hw), buf);
+ return status;
+}
+
+/**
+ * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id
+ * @pi: port information structure
+ * @vsi_id: vsi id
+ *
+ * This function retrieves the vsi list for the given vsi id
+ */
+static struct ice_sched_vsi_info *
+ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id)
+{
+ struct ice_sched_vsi_info *list_elem;
+
+ if (!pi)
+ return NULL;
+
+ list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry)
+ if (list_elem->vsi_id == vsi_id)
+ return list_elem;
+ return NULL;
+}
+
+/**
+ * ice_sched_find_node_in_subtree - Find node in part of base node subtree
+ * @hw: pointer to the hw struct
+ * @base: pointer to the base node
+ * @node: pointer to the node to search
+ *
+ * This function checks whether a given node is part of the base node
+ * subtree or not
+ */
+static bool
+ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
+ struct ice_sched_node *node)
+{
+ u8 i;
+
+ for (i = 0; i < base->num_children; i++) {
+ struct ice_sched_node *child = base->children[i];
+
+ if (node == child)
+ return true;
+
+ if (child->tx_sched_layer > node->tx_sched_layer)
+ return false;
+
+ /* this recursion is intentional, and wouldn't
+ * go more than 8 calls
+ */
+ if (ice_sched_find_node_in_subtree(hw, child, node))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ice_sched_get_free_qparent - Get a free lan or rdma q group node
+ * @pi: port information structure
+ * @vsi_id: vsi id
+ * @tc: branch number
+ * @owner: lan or rdma
+ *
+ * This function retrieves a free lan or rdma q group node
+ */
+struct ice_sched_node *
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ u8 owner)
+{
+ struct ice_sched_node *vsi_node, *qgrp_node = NULL;
+ struct ice_sched_vsi_info *list_elem;
+ u16 max_children;
+ u8 qgrp_layer;
+
+ qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
+ max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children);
+
+ list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id);
+ if (!list_elem)
+ goto lan_q_exit;
+
+ vsi_node = list_elem->vsi_node[tc];
+
+ /* validate invalid VSI id */
+ if (!vsi_node)
+ goto lan_q_exit;
+
+ /* get the first q group node from VSI sub-tree */
+ qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
+ while (qgrp_node) {
+ /* make sure the qgroup node is part of the VSI subtree */
+ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
+ if (qgrp_node->num_children < max_children &&
+ qgrp_node->owner == owner)
+ break;
+ qgrp_node = qgrp_node->sibling;
+ }
+
+lan_q_exit:
+ return qgrp_node;
+}
+
+/**
+ * ice_sched_get_vsi_node - Get a VSI node based on VSI id
+ * @hw: pointer to the hw struct
+ * @tc_node: pointer to the TC node
+ * @vsi_id: VSI id
+ *
+ * This function retrieves a VSI node for a given VSI id from a given
+ * TC branch
+ */
+static struct ice_sched_node *
+ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
+ u16 vsi_id)
+{
+ struct ice_sched_node *node;
+ u8 vsi_layer;
+
+ vsi_layer = ice_sched_get_vsi_layer(hw);
+ node = ice_sched_get_first_node(hw, tc_node, vsi_layer);
+
+ /* Check whether it already exists */
+ while (node) {
+ if (node->vsi_id == vsi_id)
+ return node;
+ node = node->sibling;
+ }
+
+ return node;
+}
+
+/**
+ * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
+ * @hw: pointer to the hw struct
+ * @num_qs: number of queues
+ * @num_nodes: num nodes array
+ *
+ * This function calculates the number of VSI child nodes based on the
+ * number of queues.
+ */
+static void
+ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
+{
+ u16 num = num_qs;
+ u8 i, qgl, vsil;
+
+ qgl = ice_sched_get_qgrp_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ /* calculate num nodes from q group to VSI layer */
+ for (i = qgl; i > vsil; i--) {
+ u16 max_children = le16_to_cpu(hw->layer_info[i].max_children);
+
+ /* round to the next integer if there is a remainder */
+ num = DIV_ROUND_UP(num, max_children);
+
+ /* need at least one node */
+ num_nodes[i] = num ? num : 1;
+ }
+}
+
+/**
+ * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
+ * @pi: port information structure
+ * @vsi_id: VSI id
+ * @tc_node: pointer to the TC node
+ * @num_nodes: pointer to the num nodes that needs to be added per layer
+ * @owner: node owner (lan or rdma)
+ *
+ * This function adds the VSI child nodes to tree. It gets called for
+ * lan and rdma separately.
+ */
+static enum ice_status
+ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id,
+ struct ice_sched_node *tc_node, u16 *num_nodes,
+ u8 owner)
+{
+ struct ice_sched_node *parent, *node;
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+ u32 first_node_teid;
+ u16 num_added = 0;
+ u8 i, qgl, vsil;
+
+ status = ice_sched_validate_for_max_nodes(pi, num_nodes);
+ if (status)
+ return status;
+
+ qgl = ice_sched_get_qgrp_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+ parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ for (i = vsil + 1; i <= qgl; i++) {
+ if (!parent)
+ return ICE_ERR_CFG;
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
+ num_nodes[i],
+ &first_node_teid,
+ &num_added);
+ if (status || num_nodes[i] != num_added)
+ return ICE_ERR_CFG;
+
+ /* The newly added node can be a new parent for the next
+ * layer nodes
+ */
+ if (num_added) {
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_node_teid);
+ node = parent;
+ while (node) {
+ node->owner = owner;
+ node = node->sibling;
+ }
+ } else {
+ parent = parent->children[0];
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
+ * @pi: port information structure
+ * @vsi_node: pointer to the VSI node
+ * @num_nodes: pointer to the num nodes that needs to be removed per layer
+ * @owner: node owner (lan or rdma)
+ *
+ * This function removes the VSI child nodes from the tree. It gets called for
+ * lan and rdma separately.
+ */
+static void
+ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node, u16 *num_nodes,
+ u8 owner)
+{
+ struct ice_sched_node *node, *next;
+ u8 i, qgl, vsil;
+ u16 num;
+
+ qgl = ice_sched_get_qgrp_layer(pi->hw);
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+
+ for (i = qgl; i > vsil; i--) {
+ num = num_nodes[i];
+ node = ice_sched_get_first_node(pi->hw, vsi_node, i);
+ while (node && num) {
+ next = node->sibling;
+ if (node->owner == owner && !node->num_children) {
+ ice_free_sched_node(pi, node);
+ num--;
+ }
+ node = next;
+ }
+ }
+}
+
+/**
+ * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
+ * @hw: pointer to the hw struct
+ * @tc_node: pointer to TC node
+ * @num_nodes: pointer to num nodes array
+ *
+ * This function calculates the number of supported nodes needed to add this
+ * VSI into tx tree including the VSI, parent and intermediate nodes in below
+ * layers
+ */
+static void
+ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
+ struct ice_sched_node *tc_node, u16 *num_nodes)
+{
+ struct ice_sched_node *node;
+ u16 max_child;
+ u8 i, vsil;
+
+ vsil = ice_sched_get_vsi_layer(hw);
+ for (i = vsil; i >= hw->sw_entry_point_layer; i--)
+ /* Add intermediate nodes if TC has no children and
+ * need at least one node for VSI
+ */
+ if (!tc_node->num_children || i == vsil) {
+ num_nodes[i]++;
+ } else {
+ /* If intermediate nodes are reached max children
+ * then add a new one.
+ */
+ node = ice_sched_get_first_node(hw, tc_node, i);
+ max_child = le16_to_cpu(hw->layer_info[i].max_children);
+
+ /* scan all the siblings */
+ while (node) {
+ if (node->num_children < max_child)
+ break;
+ node = node->sibling;
+ }
+
+ /* all the nodes are full, allocate a new one */
+ if (!node)
+ num_nodes[i]++;
+ }
+}
+
+/**
+ * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
+ * @pi: port information structure
+ * @vsi_id: VSI Id
+ * @tc_node: pointer to TC node
+ * @num_nodes: pointer to num nodes array
+ *
+ * This function adds the VSI supported nodes into tx tree including the
+ * VSI, its parent and intermediate nodes in below layers
+ */
+static enum ice_status
+ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id,
+ struct ice_sched_node *tc_node, u16 *num_nodes)
+{
+ struct ice_sched_node *parent = tc_node;
+ enum ice_status status;
+ u32 first_node_teid;
+ u16 num_added = 0;
+ u8 i, vsil;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ status = ice_sched_validate_for_max_nodes(pi, num_nodes);
+ if (status)
+ return status;
+
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+ for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
+ i, num_nodes[i],
+ &first_node_teid,
+ &num_added);
+ if (status || num_nodes[i] != num_added)
+ return ICE_ERR_CFG;
+
+ /* The newly added node can be a new parent for the next
+ * layer nodes
+ */
+ if (num_added)
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_node_teid);
+ else
+ parent = parent->children[0];
+
+ if (!parent)
+ return ICE_ERR_CFG;
+
+ if (i == vsil)
+ parent->vsi_id = vsi_id;
+ }
+ return 0;
+}
+
+/**
+ * ice_sched_add_vsi_to_topo - add a new VSI into tree
+ * @pi: port information structure
+ * @vsi_id: VSI Id
+ * @tc: TC number
+ *
+ * This function adds a new VSI into scheduler tree
+ */
+static enum ice_status
+ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc)
+{
+ u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ struct ice_sched_node *tc_node;
+ struct ice_hw *hw = pi->hw;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_PARAM;
+
+ /* calculate number of supported nodes needed for this VSI */
+ ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
+
+ /* add vsi supported nodes to tc subtree */
+ return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes);
+}
+
+/**
+ * ice_sched_update_vsi_child_nodes - update VSI child nodes
+ * @pi: port information structure
+ * @vsi_id: VSI Id
+ * @tc: TC number
+ * @new_numqs: new number of max queues
+ * @owner: owner of this subtree
+ *
+ * This function updates the VSI child nodes based on the number of queues
+ */
+static enum ice_status
+ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ u16 new_numqs, u8 owner)
+{
+ u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ struct ice_sched_node *vsi_node;
+ struct ice_sched_node *tc_node;
+ struct ice_sched_vsi_info *vsi;
+ enum ice_status status = 0;
+ struct ice_hw *hw = pi->hw;
+ u16 prev_numqs;
+ u8 i;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ if (!vsi_node)
+ return ICE_ERR_CFG;
+
+ vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
+ if (!vsi)
+ return ICE_ERR_CFG;
+
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ prev_numqs = vsi->max_lanq[tc];
+ else
+ return ICE_ERR_PARAM;
+
+ /* num queues are not changed */
+ if (prev_numqs == new_numqs)
+ return status;
+
+ /* calculate number of nodes based on prev/new number of qs */
+ if (prev_numqs)
+ ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
+
+ if (new_numqs)
+ ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
+
+ if (prev_numqs > new_numqs) {
+ for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
+ new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
+
+ ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
+ owner);
+ } else {
+ for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
+ new_num_nodes[i] -= prev_num_nodes[i];
+
+ status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node,
+ new_num_nodes, owner);
+ if (status)
+ return status;
+ }
+
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ vsi->max_lanq[tc] = new_numqs;
+
+ return status;
+}
+
+/**
+ * ice_sched_cfg_vsi - configure the new/exisiting VSI
+ * @pi: port information structure
+ * @vsi_id: VSI Id
+ * @tc: TC number
+ * @maxqs: max number of queues
+ * @owner: lan or rdma
+ * @enable: TC enabled or disabled
+ *
+ * This function adds/updates VSI nodes based on the number of queues. If TC is
+ * enabled and VSI is in suspended state then resume the VSI back. If TC is
+ * disabled then suspend the VSI if it is not already.
+ */
+enum ice_status
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
+ u8 owner, bool enable)
+{
+ struct ice_sched_node *vsi_node, *tc_node;
+ struct ice_sched_vsi_info *vsi;
+ enum ice_status status = 0;
+ struct ice_hw *hw = pi->hw;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_PARAM;
+
+ vsi = ice_sched_get_vsi_info_entry(pi, vsi_id);
+ if (!vsi)
+ vsi = ice_sched_create_vsi_info_entry(pi, vsi_id);
+ if (!vsi)
+ return ICE_ERR_NO_MEMORY;
+
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+
+ /* suspend the VSI if tc is not enabled */
+ if (!enable) {
+ if (vsi_node && vsi_node->in_use) {
+ u32 teid = le32_to_cpu(vsi_node->info.node_teid);
+
+ status = ice_sched_suspend_resume_elems(hw, 1, &teid,
+ true);
+ if (!status)
+ vsi_node->in_use = false;
+ }
+ return status;
+ }
+
+ /* TC is enabled, if it is a new VSI then add it to the tree */
+ if (!vsi_node) {
+ status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc);
+ if (status)
+ return status;
+ vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id);
+ if (!vsi_node)
+ return ICE_ERR_CFG;
+ vsi->vsi_node[tc] = vsi_node;
+ vsi_node->in_use = true;
+ }
+
+ /* update the VSI child nodes */
+ status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner);
+ if (status)
+ return status;
+
+ /* TC is enabled, resume the VSI if it is in the suspend state */
+ if (!vsi_node->in_use) {
+ u32 teid = le32_to_cpu(vsi_node->info.node_teid);
+
+ status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
+ if (!status)
+ vsi_node->in_use = true;
+ }
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
new file mode 100644
index 000000000000..badadcc120d3
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_SCHED_H_
+#define _ICE_SCHED_H_
+
+#include "ice_common.h"
+
+#define ICE_QGRP_LAYER_OFFSET 2
+#define ICE_VSI_LAYER_OFFSET 4
+
+struct ice_sched_agg_vsi_info {
+ struct list_head list_entry;
+ DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ u16 vsi_id;
+};
+
+struct ice_sched_agg_info {
+ struct list_head agg_vsi_list;
+ struct list_head list_entry;
+ DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ u32 agg_id;
+ enum ice_agg_type agg_type;
+};
+
+/* FW AQ command calls */
+enum ice_status ice_sched_init_port(struct ice_port_info *pi);
+enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+void ice_sched_cleanup_all(struct ice_hw *hw);
+struct ice_sched_node *
+ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
+enum ice_status
+ice_sched_add_node(struct ice_port_info *pi, u8 layer,
+ struct ice_aqc_txsched_elem_data *info);
+void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
+struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
+struct ice_sched_node *
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc,
+ u8 owner);
+enum ice_status
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs,
+ u8 owner, bool enable);
+#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
new file mode 100644
index 000000000000..9a95c4ffd7d7
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_STATUS_H_
+#define _ICE_STATUS_H_
+
+/* Error Codes */
+enum ice_status {
+ ICE_ERR_PARAM = -1,
+ ICE_ERR_NOT_IMPL = -2,
+ ICE_ERR_NOT_READY = -3,
+ ICE_ERR_BAD_PTR = -5,
+ ICE_ERR_INVAL_SIZE = -6,
+ ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
+ ICE_ERR_RESET_FAILED = -9,
+ ICE_ERR_FW_API_VER = -10,
+ ICE_ERR_NO_MEMORY = -11,
+ ICE_ERR_CFG = -12,
+ ICE_ERR_OUT_OF_RANGE = -13,
+ ICE_ERR_ALREADY_EXISTS = -14,
+ ICE_ERR_DOES_NOT_EXIST = -15,
+ ICE_ERR_MAX_LIMIT = -17,
+ ICE_ERR_BUF_TOO_SHORT = -52,
+ ICE_ERR_NVM_BLANK_MODE = -53,
+ ICE_ERR_AQ_ERROR = -100,
+ ICE_ERR_AQ_TIMEOUT = -101,
+ ICE_ERR_AQ_FULL = -102,
+ ICE_ERR_AQ_NO_WORK = -103,
+ ICE_ERR_AQ_EMPTY = -104,
+};
+
+#endif /* _ICE_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
new file mode 100644
index 000000000000..723d15f1e90b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -0,0 +1,1883 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_switch.h"
+
+#define ICE_ETH_DA_OFFSET 0
+#define ICE_ETH_ETHTYPE_OFFSET 12
+#define ICE_ETH_VLAN_TCI_OFFSET 14
+#define ICE_MAX_VLAN_ID 0xFFF
+
+/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
+ * struct to configure any switch filter rules.
+ * {DA (6 bytes), SA(6 bytes),
+ * Ether type (2 bytes for header without VLAN tag) OR
+ * VLAN tag (4 bytes for header with VLAN tag) }
+ *
+ * Word on Hardcoded values
+ * byte 0 = 0x2: to identify it as locally administered DA MAC
+ * byte 6 = 0x2: to identify it as locally administered SA MAC
+ * byte 12 = 0x81 & byte 13 = 0x00:
+ * In case of VLAN filter first two bytes defines ether type (0x8100)
+ * and remaining two bytes are placeholder for programming a given VLAN id
+ * In case of Ether type filter it is treated as header without VLAN tag
+ * and byte 12 and 13 is used to program a given Ether type instead
+ */
+#define DUMMY_ETH_HDR_LEN 16
+static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
+ 0x2, 0, 0, 0, 0, 0,
+ 0x81, 0, 0, 0};
+
+#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
+ (sizeof(struct ice_aqc_sw_rules_elem) - \
+ sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
+ sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
+ (sizeof(struct ice_aqc_sw_rules_elem) - \
+ sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
+ sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
+#define ICE_SW_RULE_LG_ACT_SIZE(n) \
+ (sizeof(struct ice_aqc_sw_rules_elem) - \
+ sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
+ sizeof(struct ice_sw_rule_lg_act) - \
+ sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
+ ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
+#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
+ (sizeof(struct ice_aqc_sw_rules_elem) - \
+ sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
+ sizeof(struct ice_sw_rule_vsi_list) - \
+ sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
+ ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
+
+/**
+ * ice_aq_alloc_free_res - command to allocate/free resources
+ * @hw: pointer to the hw struct
+ * @num_entries: number of resource entries in buffer
+ * @buf: Indirect buffer to hold data parameters and response
+ * @buf_size: size of buffer for indirect commands
+ * @opc: pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Helper function to allocate/free resources using the admin queue commands
+ */
+static enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_alloc_free_res_cmd *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.sw_res_ctrl;
+
+ if (!buf)
+ return ICE_ERR_PARAM;
+
+ if (buf_size < (num_entries * sizeof(buf->elem[0])))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->num_entries = cpu_to_le16(num_entries);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_get_sw_cfg - get switch configuration
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of the buffer available for response
+ * @req_desc: pointer to requested descriptor
+ * @num_elems: pointer to number of elements
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get switch configuration (0x0200) to be placed in 'buff'.
+ * This admin command returns information such as initial VSI/port number
+ * and switch ID it belongs to.
+ *
+ * NOTE: *req_desc is both an input/output parameter.
+ * The caller of this function first calls this function with *request_desc set
+ * to 0. If the response from f/w has *req_desc set to 0, all the switch
+ * configuration information has been returned; if non-zero (meaning not all
+ * the information was returned), the caller should call this function again
+ * with *req_desc set to the previous value returned by f/w to get the
+ * next block of switch configuration information.
+ *
+ * *num_elems is output only parameter. This reflects the number of elements
+ * in response buffer. The caller of this function to use *num_elems while
+ * parsing the response buffer.
+ */
+static enum ice_status
+ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
+ u16 buf_size, u16 *req_desc, u16 *num_elems,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_sw_cfg *cmd;
+ enum ice_status status;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
+ cmd = &desc.params.get_sw_conf;
+ cmd->element = cpu_to_le16(*req_desc);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status) {
+ *req_desc = le16_to_cpu(cmd->element);
+ *num_elems = le16_to_cpu(cmd->num_elems);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware (0x0210)
+ */
+enum ice_status
+ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_update_free_vsi_resp *res;
+ struct ice_aqc_add_get_update_free_vsi *cmd;
+ enum ice_status status;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.vsi_cmd;
+ res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
+
+ if (!vsi_ctx->alloc_from_pool)
+ cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
+ ICE_AQ_VSI_IS_VALID);
+
+ cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cd);
+
+ if (!status) {
+ vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
+ vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_update_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update VSI context in the hardware (0x0211)
+ */
+enum ice_status
+ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_update_free_vsi_resp *resp;
+ struct ice_aqc_add_get_update_free_vsi *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.vsi_cmd;
+ resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
+
+ cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cd);
+
+ if (!status) {
+ vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_free_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a VSI context struct
+ * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get VSI context info from hardware (0x0213)
+ */
+enum ice_status
+ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_update_free_vsi_resp *resp;
+ struct ice_aqc_add_get_update_free_vsi *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.vsi_cmd;
+ resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
+
+ cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
+ if (keep_vsi_alloc)
+ cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status) {
+ vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_alloc_free_vsi_list
+ * @hw: pointer to the hw struct
+ * @vsi_list_id: VSI list id returned or used for lookup
+ * @lkup_type: switch rule filter lookup type
+ * @opc: switch rules population command type - pass in the command opcode
+ *
+ * allocates or free a VSI list resource
+ */
+static enum ice_status
+ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
+ enum ice_sw_lkup_type lkup_type,
+ enum ice_adminq_opc opc)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ struct ice_aqc_res_elem *vsi_ele;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = sizeof(*sw_buf);
+ sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+ sw_buf->num_elems = cpu_to_le16(1);
+
+ if (lkup_type == ICE_SW_LKUP_MAC ||
+ lkup_type == ICE_SW_LKUP_MAC_VLAN ||
+ lkup_type == ICE_SW_LKUP_ETHERTYPE ||
+ lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ lkup_type == ICE_SW_LKUP_PROMISC ||
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
+ sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
+ } else if (lkup_type == ICE_SW_LKUP_VLAN) {
+ sw_buf->res_type =
+ cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
+ } else {
+ status = ICE_ERR_PARAM;
+ goto ice_aq_alloc_free_vsi_list_exit;
+ }
+
+ if (opc == ice_aqc_opc_free_res)
+ sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
+
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
+ if (status)
+ goto ice_aq_alloc_free_vsi_list_exit;
+
+ if (opc == ice_aqc_opc_alloc_res) {
+ vsi_ele = &sw_buf->elem[0];
+ *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
+ }
+
+ice_aq_alloc_free_vsi_list_exit:
+ devm_kfree(ice_hw_to_dev(hw), sw_buf);
+ return status;
+}
+
+/**
+ * ice_aq_sw_rules - add/update/remove switch rules
+ * @hw: pointer to the hw struct
+ * @rule_list: pointer to switch rule population list
+ * @rule_list_sz: total size of the rule list in bytes
+ * @num_rules: number of switch rules in the rule_list
+ * @opc: switch rules population command type - pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
+ */
+static enum ice_status
+ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
+ u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ if (opc != ice_aqc_opc_add_sw_rules &&
+ opc != ice_aqc_opc_update_sw_rules &&
+ opc != ice_aqc_opc_remove_sw_rules)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.params.sw_rules.num_rules_fltr_entry_index =
+ cpu_to_le16(num_rules);
+ return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+}
+
+/* ice_init_port_info - Initialize port_info with switch configuration data
+ * @pi: pointer to port_info
+ * @vsi_port_num: VSI number or port number
+ * @type: Type of switch element (port or VSI)
+ * @swid: switch ID of the switch the element is attached to
+ * @pf_vf_num: PF or VF number
+ * @is_vf: true if the element is a VF, false otherwise
+ */
+static void
+ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
+ u16 swid, u16 pf_vf_num, bool is_vf)
+{
+ switch (type) {
+ case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
+ pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
+ pi->sw_id = swid;
+ pi->pf_vf_num = pf_vf_num;
+ pi->is_vf = is_vf;
+ pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
+ pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
+ break;
+ default:
+ ice_debug(pi->hw, ICE_DBG_SW,
+ "incorrect VSI/port type received\n");
+ break;
+ }
+}
+
+/* ice_get_initial_sw_cfg - Get initial port and default VSI data
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
+{
+ struct ice_aqc_get_sw_cfg_resp *rbuf;
+ enum ice_status status;
+ u16 req_desc = 0;
+ u16 num_elems;
+ u16 i;
+
+ rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
+ GFP_KERNEL);
+
+ if (!rbuf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Multiple calls to ice_aq_get_sw_cfg may be required
+ * to get all the switch configuration information. The need
+ * for additional calls is indicated by ice_aq_get_sw_cfg
+ * writing a non-zero value in req_desc
+ */
+ do {
+ status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
+ &req_desc, &num_elems, NULL);
+
+ if (status)
+ break;
+
+ for (i = 0; i < num_elems; i++) {
+ struct ice_aqc_get_sw_cfg_resp_elem *ele;
+ u16 pf_vf_num, swid, vsi_port_num;
+ bool is_vf = false;
+ u8 type;
+
+ ele = rbuf[i].elements;
+ vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
+ ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
+
+ pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
+ ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
+
+ swid = le16_to_cpu(ele->swid);
+
+ if (le16_to_cpu(ele->pf_vf_num) &
+ ICE_AQC_GET_SW_CONF_RESP_IS_VF)
+ is_vf = true;
+
+ type = le16_to_cpu(ele->vsi_port_num) >>
+ ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
+
+ if (type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
+ /* FW VSI is not needed. Just continue. */
+ continue;
+ }
+
+ ice_init_port_info(hw->port_info, vsi_port_num,
+ type, swid, pf_vf_num, is_vf);
+ }
+ } while (req_desc && !status);
+
+ devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
+ return status;
+}
+
+/**
+ * ice_fill_sw_info - Helper function to populate lb_en and lan_en
+ * @hw: pointer to the hardware structure
+ * @f_info: filter info structure to fill/update
+ *
+ * This helper function populates the lb_en and lan_en elements of the provided
+ * ice_fltr_info struct using the switch's type and characteristics of the
+ * switch rule being configured.
+ */
+static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
+{
+ f_info->lb_en = false;
+ f_info->lan_en = false;
+ if ((f_info->flag & ICE_FLTR_TX) &&
+ (f_info->fltr_act == ICE_FWD_TO_VSI ||
+ f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
+ f_info->fltr_act == ICE_FWD_TO_Q ||
+ f_info->fltr_act == ICE_FWD_TO_QGRP)) {
+ f_info->lb_en = true;
+ if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
+ is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
+ f_info->lan_en = true;
+ }
+}
+
+/**
+ * ice_fill_sw_rule - Helper function to fill switch rule structure
+ * @hw: pointer to the hardware structure
+ * @f_info: entry containing packet forwarding information
+ * @s_rule: switch rule structure to be filled in based on mac_entry
+ * @opc: switch rules population command type - pass in the command opcode
+ */
+static void
+ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
+ struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
+{
+ u16 vlan_id = ICE_MAX_VLAN_ID + 1;
+ u8 eth_hdr[DUMMY_ETH_HDR_LEN];
+ void *daddr = NULL;
+ u32 act = 0;
+ __be16 *off;
+
+ if (opc == ice_aqc_opc_remove_sw_rules) {
+ s_rule->pdata.lkup_tx_rx.act = 0;
+ s_rule->pdata.lkup_tx_rx.index =
+ cpu_to_le16(f_info->fltr_rule_id);
+ s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ return;
+ }
+
+ /* initialize the ether header with a dummy header */
+ memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header));
+ ice_fill_sw_info(hw, f_info);
+
+ switch (f_info->fltr_act) {
+ case ICE_FWD_TO_VSI:
+ act |= (f_info->fwd_id.vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
+ ICE_SINGLE_ACT_VSI_ID_M;
+ if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ case ICE_FWD_TO_VSI_LIST:
+ act |= ICE_SINGLE_ACT_VSI_LIST;
+ act |= (f_info->fwd_id.vsi_list_id <<
+ ICE_SINGLE_ACT_VSI_LIST_ID_S) &
+ ICE_SINGLE_ACT_VSI_LIST_ID_M;
+ if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ case ICE_FWD_TO_Q:
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ break;
+ case ICE_FWD_TO_QGRP:
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
+ ICE_SINGLE_ACT_Q_REGION_M;
+ break;
+ case ICE_DROP_PACKET:
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
+ break;
+ default:
+ return;
+ }
+
+ if (f_info->lb_en)
+ act |= ICE_SINGLE_ACT_LB_ENABLE;
+ if (f_info->lan_en)
+ act |= ICE_SINGLE_ACT_LAN_ENABLE;
+
+ switch (f_info->lkup_type) {
+ case ICE_SW_LKUP_MAC:
+ daddr = f_info->l_data.mac.mac_addr;
+ break;
+ case ICE_SW_LKUP_VLAN:
+ vlan_id = f_info->l_data.vlan.vlan_id;
+ if (f_info->fltr_act == ICE_FWD_TO_VSI ||
+ f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
+ act |= ICE_SINGLE_ACT_PRUNE;
+ act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
+ }
+ break;
+ case ICE_SW_LKUP_ETHERTYPE_MAC:
+ daddr = f_info->l_data.ethertype_mac.mac_addr;
+ /* fall-through */
+ case ICE_SW_LKUP_ETHERTYPE:
+ off = (__be16 *)&eth_hdr[ICE_ETH_ETHTYPE_OFFSET];
+ *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
+ break;
+ case ICE_SW_LKUP_MAC_VLAN:
+ daddr = f_info->l_data.mac_vlan.mac_addr;
+ vlan_id = f_info->l_data.mac_vlan.vlan_id;
+ break;
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ vlan_id = f_info->l_data.mac_vlan.vlan_id;
+ /* fall-through */
+ case ICE_SW_LKUP_PROMISC:
+ daddr = f_info->l_data.mac_vlan.mac_addr;
+ break;
+ default:
+ break;
+ }
+
+ s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
+ cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
+ cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+
+ /* Recipe set depending on lookup type */
+ s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
+ s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
+ s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+
+ if (daddr)
+ ether_addr_copy(&eth_hdr[ICE_ETH_DA_OFFSET], daddr);
+
+ if (!(vlan_id > ICE_MAX_VLAN_ID)) {
+ off = (__be16 *)&eth_hdr[ICE_ETH_VLAN_TCI_OFFSET];
+ *off = cpu_to_be16(vlan_id);
+ }
+
+ /* Create the switch rule with the final dummy Ethernet header */
+ if (opc != ice_aqc_opc_update_sw_rules)
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr));
+
+ memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr));
+}
+
+/**
+ * ice_add_marker_act
+ * @hw: pointer to the hardware structure
+ * @m_ent: the management entry for which sw marker needs to be added
+ * @sw_marker: sw marker to tag the Rx descriptor with
+ * @l_id: large action resource id
+ *
+ * Create a large action to hold software marker and update the switch rule
+ * entry pointed by m_ent with newly created large action
+ */
+static enum ice_status
+ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
+ u16 sw_marker, u16 l_id)
+{
+ struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
+ /* For software marker we need 3 large actions
+ * 1. FWD action: FWD TO VSI or VSI LIST
+ * 2. GENERIC VALUE action to hold the profile id
+ * 3. GENERIC VALUE action to hold the software marker id
+ */
+ const u16 num_lg_acts = 3;
+ enum ice_status status;
+ u16 lg_act_size;
+ u16 rules_size;
+ u16 vsi_info;
+ u32 act;
+
+ if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
+ return ICE_ERR_PARAM;
+
+ /* Create two back-to-back switch rules and submit them to the HW using
+ * one memory buffer:
+ * 1. Large Action
+ * 2. Look up tx rx
+ */
+ lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
+ rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+ lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
+ if (!lg_act)
+ return ICE_ERR_NO_MEMORY;
+
+ rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
+
+ /* Fill in the first switch rule i.e. large action */
+ lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
+ lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
+ lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
+
+ /* First action VSI forwarding or VSI list forwarding depending on how
+ * many VSIs
+ */
+ vsi_info = (m_ent->vsi_count > 1) ?
+ m_ent->fltr_info.fwd_id.vsi_list_id :
+ m_ent->fltr_info.fwd_id.vsi_id;
+
+ act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
+ act |= (vsi_info << ICE_LG_ACT_VSI_LIST_ID_S) &
+ ICE_LG_ACT_VSI_LIST_ID_M;
+ if (m_ent->vsi_count > 1)
+ act |= ICE_LG_ACT_VSI_LIST;
+ lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
+
+ /* Second action descriptor type */
+ act = ICE_LG_ACT_GENERIC;
+
+ act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+ lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
+
+ act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+
+ /* Third action Marker value */
+ act |= ICE_LG_ACT_GENERIC;
+ act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
+ ICE_LG_ACT_GENERIC_VALUE_M;
+
+ act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+ lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
+
+ /* call the fill switch rule to fill the lookup tx rx structure */
+ ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
+ ice_aqc_opc_update_sw_rules);
+
+ /* Update the action to point to the large action id */
+ rx_tx->pdata.lkup_tx_rx.act =
+ cpu_to_le32(ICE_SINGLE_ACT_PTR |
+ ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
+ ICE_SINGLE_ACT_PTR_VAL_M));
+
+ /* Use the filter rule id of the previously created rule with single
+ * act. Once the update happens, hardware will treat this as large
+ * action
+ */
+ rx_tx->pdata.lkup_tx_rx.index =
+ cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
+
+ status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
+ ice_aqc_opc_update_sw_rules, NULL);
+ if (!status) {
+ m_ent->lg_act_idx = l_id;
+ m_ent->sw_marker_id = sw_marker;
+ }
+
+ devm_kfree(ice_hw_to_dev(hw), lg_act);
+ return status;
+}
+
+/**
+ * ice_create_vsi_list_map
+ * @hw: pointer to the hardware structure
+ * @vsi_array: array of VSIs to form a VSI list
+ * @num_vsi: num VSI in the array
+ * @vsi_list_id: VSI list id generated as part of allocate resource
+ *
+ * Helper function to create a new entry of VSI list id to VSI mapping
+ * using the given VSI list id
+ */
+static struct ice_vsi_list_map_info *
+ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ u16 vsi_list_id)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_vsi_list_map_info *v_map;
+ int i;
+
+ v_map = devm_kcalloc(ice_hw_to_dev(hw), 1, sizeof(*v_map), GFP_KERNEL);
+ if (!v_map)
+ return NULL;
+
+ v_map->vsi_list_id = vsi_list_id;
+
+ for (i = 0; i < num_vsi; i++)
+ set_bit(vsi_array[i], v_map->vsi_map);
+
+ list_add(&v_map->list_entry, &sw->vsi_list_map_head);
+ return v_map;
+}
+
+/**
+ * ice_update_vsi_list_rule
+ * @hw: pointer to the hardware structure
+ * @vsi_array: array of VSIs to form a VSI list
+ * @num_vsi: num VSI in the array
+ * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @remove: Boolean value to indicate if this is a remove action
+ * @opc: switch rules population command type - pass in the command opcode
+ * @lkup_type: lookup type of the filter
+ *
+ * Call AQ command to add a new switch rule or update existing switch rule
+ * using the given VSI list id
+ */
+static enum ice_status
+ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
+ enum ice_sw_lkup_type lkup_type)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status status;
+ u16 s_rule_size;
+ u16 type;
+ int i;
+
+ if (!num_vsi)
+ return ICE_ERR_PARAM;
+
+ if (lkup_type == ICE_SW_LKUP_MAC ||
+ lkup_type == ICE_SW_LKUP_MAC_VLAN ||
+ lkup_type == ICE_SW_LKUP_ETHERTYPE ||
+ lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ lkup_type == ICE_SW_LKUP_PROMISC ||
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
+ type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_VSI_LIST_SET;
+ else if (lkup_type == ICE_SW_LKUP_VLAN)
+ type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
+ else
+ return ICE_ERR_PARAM;
+
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < num_vsi; i++)
+ s_rule->pdata.vsi_list.vsi[i] = cpu_to_le16(vsi_array[i]);
+
+ s_rule->type = cpu_to_le16(type);
+ s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
+ s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+
+ status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
+
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ return status;
+}
+
+/**
+ * ice_create_vsi_list_rule - Creates and populates a VSI list rule
+ * @hw: pointer to the hw struct
+ * @vsi_array: array of VSIs to form a VSI list
+ * @num_vsi: number of VSIs in the array
+ * @vsi_list_id: stores the ID of the VSI list to be created
+ * @lkup_type: switch rule filter's lookup type
+ */
+static enum ice_status
+ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_array, u16 num_vsi,
+ u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
+{
+ enum ice_status status;
+ int i;
+
+ for (i = 0; i < num_vsi; i++)
+ if (vsi_array[i] >= ICE_MAX_VSI)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
+ ice_aqc_opc_alloc_res);
+ if (status)
+ return status;
+
+ /* Update the newly created VSI list to include the specified VSIs */
+ return ice_update_vsi_list_rule(hw, vsi_array, num_vsi, *vsi_list_id,
+ false, ice_aqc_opc_add_sw_rules,
+ lkup_type);
+}
+
+/**
+ * ice_create_pkt_fwd_rule
+ * @hw: pointer to the hardware structure
+ * @f_entry: entry containing packet forwarding information
+ *
+ * Create switch rule with given filter information and add an entry
+ * to the corresponding filter management list to track this switch rule
+ * and VSI mapping
+ */
+static enum ice_status
+ice_create_pkt_fwd_rule(struct ice_hw *hw,
+ struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_sw_lkup_type l_type;
+ enum ice_status status;
+
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw),
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
+ GFP_KERNEL);
+ if (!fm_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_create_pkt_fwd_rule_exit;
+ }
+
+ fm_entry->fltr_info = f_entry->fltr_info;
+
+ /* Initialize all the fields for the management entry */
+ fm_entry->vsi_count = 1;
+ fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
+ fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
+ fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
+
+ ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
+ ice_aqc_opc_add_sw_rules);
+
+ status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ ice_aqc_opc_add_sw_rules, NULL);
+ if (status) {
+ devm_kfree(ice_hw_to_dev(hw), fm_entry);
+ goto ice_create_pkt_fwd_rule_exit;
+ }
+
+ f_entry->fltr_info.fltr_rule_id =
+ le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ fm_entry->fltr_info.fltr_rule_id =
+ le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+
+ /* The book keeping entries will get removed when base driver
+ * calls remove filter AQ command
+ */
+ l_type = fm_entry->fltr_info.lkup_type;
+ if (l_type == ICE_SW_LKUP_MAC) {
+ mutex_lock(&sw->mac_list_lock);
+ list_add(&fm_entry->list_entry, &sw->mac_list_head);
+ mutex_unlock(&sw->mac_list_lock);
+ } else if (l_type == ICE_SW_LKUP_VLAN) {
+ mutex_lock(&sw->vlan_list_lock);
+ list_add(&fm_entry->list_entry, &sw->vlan_list_head);
+ mutex_unlock(&sw->vlan_list_lock);
+ } else if (l_type == ICE_SW_LKUP_ETHERTYPE ||
+ l_type == ICE_SW_LKUP_ETHERTYPE_MAC) {
+ mutex_lock(&sw->eth_m_list_lock);
+ list_add(&fm_entry->list_entry, &sw->eth_m_list_head);
+ mutex_unlock(&sw->eth_m_list_lock);
+ } else if (l_type == ICE_SW_LKUP_PROMISC ||
+ l_type == ICE_SW_LKUP_PROMISC_VLAN) {
+ mutex_lock(&sw->promisc_list_lock);
+ list_add(&fm_entry->list_entry, &sw->promisc_list_head);
+ mutex_unlock(&sw->promisc_list_lock);
+ } else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) {
+ mutex_lock(&sw->mac_vlan_list_lock);
+ list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head);
+ mutex_unlock(&sw->mac_vlan_list_lock);
+ } else {
+ status = ICE_ERR_NOT_IMPL;
+ }
+ice_create_pkt_fwd_rule_exit:
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ return status;
+}
+
+/**
+ * ice_update_pkt_fwd_rule
+ * @hw: pointer to the hardware structure
+ * @rule_id: rule of previously created switch rule to update
+ * @vsi_list_id: VSI list id to be updated with
+ * @f_info: ice_fltr_info to pull other information for switch rule
+ *
+ * Call AQ command to update a previously created switch rule with a
+ * VSI list id
+ */
+static enum ice_status
+ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id,
+ struct ice_fltr_info f_info)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_fltr_info tmp_fltr;
+ enum ice_status status;
+
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw),
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ tmp_fltr = f_info;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+
+ ice_fill_sw_rule(hw, &tmp_fltr, s_rule,
+ ice_aqc_opc_update_sw_rules);
+
+ s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
+
+ /* Update switch rule with new rule set to forward VSI list */
+ status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ ice_aqc_opc_update_sw_rules, NULL);
+
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ return status;
+}
+
+/**
+ * ice_handle_vsi_list_mgmt
+ * @hw: pointer to the hardware structure
+ * @m_entry: pointer to current filter management list entry
+ * @cur_fltr: filter information from the book keeping entry
+ * @new_fltr: filter information with the new VSI to be added
+ *
+ * Call AQ command to add or update previously created VSI list with new VSI.
+ *
+ * Helper function to do book keeping associated with adding filter information
+ * The algorithm to do the booking keeping is described below :
+ * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
+ * if only one VSI has been added till now
+ * Allocate a new VSI list and add two VSIs
+ * to this list using switch rule command
+ * Update the previously created switch rule with the
+ * newly created VSI list id
+ * if a VSI list was previously created
+ * Add the new VSI to the previously created VSI list set
+ * using the update switch rule command
+ */
+static enum ice_status
+ice_handle_vsi_list_mgmt(struct ice_hw *hw,
+ struct ice_fltr_mgmt_list_entry *m_entry,
+ struct ice_fltr_info *cur_fltr,
+ struct ice_fltr_info *new_fltr)
+{
+ enum ice_status status = 0;
+ u16 vsi_list_id = 0;
+
+ if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
+ cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
+ return ICE_ERR_NOT_IMPL;
+
+ if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
+ new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
+ (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
+ cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
+ return ICE_ERR_NOT_IMPL;
+
+ if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
+ /* Only one entry existed in the mapping and it was not already
+ * a part of a VSI list. So, create a VSI list with the old and
+ * new VSIs.
+ */
+ u16 vsi_id_arr[2];
+ u16 fltr_rule;
+
+ /* A rule already exists with the new VSI being added */
+ if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id)
+ return ICE_ERR_ALREADY_EXISTS;
+
+ vsi_id_arr[0] = cur_fltr->fwd_id.vsi_id;
+ vsi_id_arr[1] = new_fltr->fwd_id.vsi_id;
+ status = ice_create_vsi_list_rule(hw, &vsi_id_arr[0], 2,
+ &vsi_list_id,
+ new_fltr->lkup_type);
+ if (status)
+ return status;
+
+ fltr_rule = cur_fltr->fltr_rule_id;
+ /* Update the previous switch rule of "MAC forward to VSI" to
+ * "MAC fwd to VSI list"
+ */
+ status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id,
+ *new_fltr);
+ if (status)
+ return status;
+
+ cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
+ cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
+ m_entry->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_id_arr[0], 2,
+ vsi_list_id);
+
+ /* If this entry was large action then the large action needs
+ * to be updated to point to FWD to VSI list
+ */
+ if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
+ status =
+ ice_add_marker_act(hw, m_entry,
+ m_entry->sw_marker_id,
+ m_entry->lg_act_idx);
+ } else {
+ u16 vsi_id = new_fltr->fwd_id.vsi_id;
+ enum ice_adminq_opc opcode;
+
+ /* A rule already exists with the new VSI being added */
+ if (test_bit(vsi_id, m_entry->vsi_list_info->vsi_map))
+ return 0;
+
+ /* Update the previously created VSI list set with
+ * the new VSI id passed in
+ */
+ vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
+ opcode = ice_aqc_opc_update_sw_rules;
+
+ status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
+ false, opcode,
+ new_fltr->lkup_type);
+ /* update VSI list mapping info with new VSI id */
+ if (!status)
+ set_bit(vsi_id, m_entry->vsi_list_info->vsi_map);
+ }
+ if (!status)
+ m_entry->vsi_count++;
+ return status;
+}
+
+/**
+ * ice_find_mac_entry
+ * @hw: pointer to the hardware structure
+ * @mac_addr: MAC address to search for
+ *
+ * Helper function to search for a MAC entry using a given MAC address
+ * Returns pointer to the entry if found.
+ */
+static struct ice_fltr_mgmt_list_entry *
+ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr)
+{
+ struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL;
+ struct ice_switch_info *sw = hw->switch_info;
+
+ mutex_lock(&sw->mac_list_lock);
+ list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) {
+ u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
+
+ if (ether_addr_equal(buf, mac_addr)) {
+ mac_ret = m_list_itr;
+ break;
+ }
+ }
+ mutex_unlock(&sw->mac_list_lock);
+ return mac_ret;
+}
+
+/**
+ * ice_add_shared_mac - Add one MAC shared filter rule
+ * @hw: pointer to the hardware structure
+ * @f_entry: structure containing MAC forwarding information
+ *
+ * Adds or updates the book keeping list for the MAC addresses
+ */
+static enum ice_status
+ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_fltr_info *new_fltr, *cur_fltr;
+ struct ice_fltr_mgmt_list_entry *m_entry;
+
+ new_fltr = &f_entry->fltr_info;
+
+ m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]);
+ if (!m_entry)
+ return ice_create_pkt_fwd_rule(hw, f_entry);
+
+ cur_fltr = &m_entry->fltr_info;
+
+ return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr);
+}
+
+/**
+ * ice_add_mac - Add a MAC address based filter rule
+ * @hw: pointer to the hardware structure
+ * @m_list: list of MAC addresses and forwarding information
+ *
+ * IMPORTANT: When the ucast_shared flag is set to false and m_list has
+ * multiple unicast addresses, the function assumes that all the
+ * addresses are unique in a given add_mac call. It doesn't
+ * check for duplicates in this case, removing duplicates from a given
+ * list should be taken care of in the caller of this function.
+ */
+enum ice_status
+ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
+{
+ struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
+ struct ice_fltr_list_entry *m_list_itr;
+ u16 elem_sent, total_elem_left;
+ enum ice_status status = 0;
+ u16 num_unicast = 0;
+ u16 s_rule_size;
+
+ if (!m_list || !hw)
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry(m_list_itr, m_list, list_entry) {
+ u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
+
+ if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
+ return ICE_ERR_PARAM;
+ if (is_zero_ether_addr(add))
+ return ICE_ERR_PARAM;
+ if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
+ /* Don't overwrite the unicast address */
+ if (ice_find_mac_entry(hw, add))
+ return ICE_ERR_ALREADY_EXISTS;
+ num_unicast++;
+ } else if (is_multicast_ether_addr(add) ||
+ (is_unicast_ether_addr(add) && hw->ucast_shared)) {
+ status = ice_add_shared_mac(hw, m_list_itr);
+ if (status) {
+ m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
+ return status;
+ }
+ m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ }
+ }
+
+ /* Exit if no suitable entries were found for adding bulk switch rule */
+ if (!num_unicast)
+ return 0;
+
+ /* Allocate switch rule buffer for the bulk update for unicast */
+ s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+ s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
+ GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ r_iter = s_rule;
+ list_for_each_entry(m_list_itr, m_list, list_entry) {
+ struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
+ u8 *addr = &f_info->l_data.mac.mac_addr[0];
+
+ if (is_unicast_ether_addr(addr)) {
+ ice_fill_sw_rule(hw, &m_list_itr->fltr_info,
+ r_iter, ice_aqc_opc_add_sw_rules);
+ r_iter = (struct ice_aqc_sw_rules_elem *)
+ ((u8 *)r_iter + s_rule_size);
+ }
+ }
+
+ /* Call AQ bulk switch rule update for all unicast addresses */
+ r_iter = s_rule;
+ /* Call AQ switch rule in AQ_MAX chunk */
+ for (total_elem_left = num_unicast; total_elem_left > 0;
+ total_elem_left -= elem_sent) {
+ struct ice_aqc_sw_rules_elem *entry = r_iter;
+
+ elem_sent = min(total_elem_left,
+ (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
+ status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
+ elem_sent, ice_aqc_opc_add_sw_rules,
+ NULL);
+ if (status)
+ goto ice_add_mac_exit;
+ r_iter = (struct ice_aqc_sw_rules_elem *)
+ ((u8 *)r_iter + (elem_sent * s_rule_size));
+ }
+
+ /* Fill up rule id based on the value returned from FW */
+ r_iter = s_rule;
+ list_for_each_entry(m_list_itr, m_list, list_entry) {
+ struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
+ u8 *addr = &f_info->l_data.mac.mac_addr[0];
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+
+ if (is_unicast_ether_addr(addr)) {
+ f_info->fltr_rule_id =
+ le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
+ f_info->fltr_act = ICE_FWD_TO_VSI;
+ /* Create an entry to track this MAC address */
+ fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*fm_entry), GFP_KERNEL);
+ if (!fm_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_add_mac_exit;
+ }
+ fm_entry->fltr_info = *f_info;
+ fm_entry->vsi_count = 1;
+ /* The book keeping entries will get removed when
+ * base driver calls remove filter AQ command
+ */
+ mutex_lock(&sw->mac_list_lock);
+ list_add(&fm_entry->list_entry, &sw->mac_list_head);
+ mutex_unlock(&sw->mac_list_lock);
+
+ r_iter = (struct ice_aqc_sw_rules_elem *)
+ ((u8 *)r_iter + s_rule_size);
+ }
+ }
+
+ice_add_mac_exit:
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ return status;
+}
+
+/**
+ * ice_find_vlan_entry
+ * @hw: pointer to the hardware structure
+ * @vlan_id: VLAN id to search for
+ *
+ * Helper function to search for a VLAN entry using a given VLAN id
+ * Returns pointer to the entry if found.
+ */
+static struct ice_fltr_mgmt_list_entry *
+ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id)
+{
+ struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL;
+ struct ice_switch_info *sw = hw->switch_info;
+
+ mutex_lock(&sw->vlan_list_lock);
+ list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry)
+ if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) {
+ vlan_ret = vlan_list_itr;
+ break;
+ }
+
+ mutex_unlock(&sw->vlan_list_lock);
+ return vlan_ret;
+}
+
+/**
+ * ice_add_vlan_internal - Add one VLAN based filter rule
+ * @hw: pointer to the hardware structure
+ * @f_entry: filter entry containing one VLAN information
+ */
+static enum ice_status
+ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_fltr_info *new_fltr, *cur_fltr;
+ struct ice_fltr_mgmt_list_entry *v_list_itr;
+ u16 vlan_id;
+
+ new_fltr = &f_entry->fltr_info;
+ /* VLAN id should only be 12 bits */
+ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
+ return ICE_ERR_PARAM;
+
+ vlan_id = new_fltr->l_data.vlan.vlan_id;
+ v_list_itr = ice_find_vlan_entry(hw, vlan_id);
+ if (!v_list_itr) {
+ u16 vsi_id = ICE_VSI_INVAL_ID;
+ enum ice_status status;
+ u16 vsi_list_id = 0;
+
+ if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
+ enum ice_sw_lkup_type lkup_type = new_fltr->lkup_type;
+
+ /* All VLAN pruning rules use a VSI list.
+ * Convert the action to forwarding to a VSI list.
+ */
+ vsi_id = new_fltr->fwd_id.vsi_id;
+ status = ice_create_vsi_list_rule(hw, &vsi_id, 1,
+ &vsi_list_id,
+ lkup_type);
+ if (status)
+ return status;
+ new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
+ new_fltr->fwd_id.vsi_list_id = vsi_list_id;
+ }
+
+ status = ice_create_pkt_fwd_rule(hw, f_entry);
+ if (!status && vsi_id != ICE_VSI_INVAL_ID) {
+ v_list_itr = ice_find_vlan_entry(hw, vlan_id);
+ if (!v_list_itr)
+ return ICE_ERR_DOES_NOT_EXIST;
+ v_list_itr->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_id, 1,
+ vsi_list_id);
+ }
+
+ return status;
+ }
+
+ cur_fltr = &v_list_itr->fltr_info;
+ return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr);
+}
+
+/**
+ * ice_add_vlan - Add VLAN based filter rule
+ * @hw: pointer to the hardware structure
+ * @v_list: list of VLAN entries and forwarding information
+ */
+enum ice_status
+ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
+{
+ struct ice_fltr_list_entry *v_list_itr;
+
+ if (!v_list || !hw)
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry(v_list_itr, v_list, list_entry) {
+ enum ice_status status;
+
+ if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
+ return ICE_ERR_PARAM;
+
+ status = ice_add_vlan_internal(hw, v_list_itr);
+ if (status) {
+ v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
+ return status;
+ }
+ v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ }
+ return 0;
+}
+
+/**
+ * ice_remove_vsi_list_rule
+ * @hw: pointer to the hardware structure
+ * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @lkup_type: switch rule filter lookup type
+ */
+static enum ice_status
+ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
+ enum ice_sw_lkup_type lkup_type)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status status;
+ u16 s_rule_size;
+
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
+ s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+ /* FW expects number of VSIs in vsi_list resource to be 0 for clear
+ * command. Since memory is zero'ed out during initialization, it's not
+ * necessary to explicitly initialize the variable to 0.
+ */
+
+ status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1,
+ ice_aqc_opc_remove_sw_rules, NULL);
+ if (!status)
+ /* Free the vsi_list resource that we allocated */
+ status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
+ ice_aqc_opc_free_res);
+
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ return status;
+}
+
+/**
+ * ice_handle_rem_vsi_list_mgmt
+ * @hw: pointer to the hardware structure
+ * @vsi_id: ID of the VSI to remove
+ * @fm_list_itr: filter management entry for which the VSI list management
+ * needs to be done
+ */
+static enum ice_status
+ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id,
+ struct ice_fltr_mgmt_list_entry *fm_list_itr)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ enum ice_status status = 0;
+ enum ice_sw_lkup_type lkup_type;
+ bool is_last_elem = true;
+ bool conv_list = false;
+ bool del_list = false;
+ u16 vsi_list_id;
+
+ lkup_type = fm_list_itr->fltr_info.lkup_type;
+ vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id;
+
+ if (fm_list_itr->vsi_count > 1) {
+ status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id,
+ true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+ fm_list_itr->vsi_count--;
+ is_last_elem = false;
+ clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map);
+ }
+
+ /* For non-VLAN rules that forward packets to a VSI list, convert them
+ * to forwarding packets to a VSI if there is only one VSI left in the
+ * list. Unused lists are then removed.
+ * VLAN rules need to use VSI lists even with only one VSI.
+ */
+ if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) {
+ if (lkup_type == ICE_SW_LKUP_VLAN) {
+ del_list = is_last_elem;
+ } else if (fm_list_itr->vsi_count == 1) {
+ conv_list = true;
+ del_list = true;
+ }
+ }
+
+ if (del_list) {
+ /* Remove the VSI list since it is no longer used */
+ struct ice_vsi_list_map_info *vsi_list_info =
+ fm_list_itr->vsi_list_info;
+
+ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
+ if (status)
+ return status;
+
+ if (conv_list) {
+ u16 rem_vsi_id;
+
+ rem_vsi_id = find_first_bit(vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+
+ /* Error out when the expected last element is not in
+ * the VSI list map
+ */
+ if (rem_vsi_id == ICE_MAX_VSI)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* Change the list entry action from VSI_LIST to VSI */
+ fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id;
+ }
+
+ list_del(&vsi_list_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
+ fm_list_itr->vsi_list_info = NULL;
+ }
+
+ if (conv_list) {
+ /* Convert the rule's forward action to forwarding packets to
+ * a VSI
+ */
+ struct ice_aqc_sw_rules_elem *s_rule;
+
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw),
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE,
+ GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
+ ice_aqc_opc_update_sw_rules);
+
+ s_rule->pdata.lkup_tx_rx.index =
+ cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id);
+
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ ice_aqc_opc_update_sw_rules, NULL);
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ if (status)
+ return status;
+ }
+
+ if (is_last_elem) {
+ /* Remove the lookup rule */
+ struct ice_aqc_sw_rules_elem *s_rule;
+
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw),
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
+ GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule,
+ ice_aqc_opc_remove_sw_rules);
+
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
+ ice_aqc_opc_remove_sw_rules, NULL);
+ if (status)
+ return status;
+
+ /* Remove a book keeping entry from the MAC address list */
+ mutex_lock(&sw->mac_list_lock);
+ list_del(&fm_list_itr->list_entry);
+ mutex_unlock(&sw->mac_list_lock);
+ devm_kfree(ice_hw_to_dev(hw), fm_list_itr);
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ }
+ return status;
+}
+
+/**
+ * ice_remove_mac_entry
+ * @hw: pointer to the hardware structure
+ * @f_entry: structure containing MAC forwarding information
+ */
+static enum ice_status
+ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_fltr_mgmt_list_entry *m_entry;
+ u16 vsi_id;
+ u8 *add;
+
+ add = &f_entry->fltr_info.l_data.mac.mac_addr[0];
+
+ m_entry = ice_find_mac_entry(hw, add);
+ if (!m_entry)
+ return ICE_ERR_PARAM;
+
+ vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
+ return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry);
+}
+
+/**
+ * ice_remove_mac - remove a MAC address based filter rule
+ * @hw: pointer to the hardware structure
+ * @m_list: list of MAC addresses and forwarding information
+ *
+ * This function removes either a MAC filter rule or a specific VSI from a
+ * VSI list for a multicast MAC address.
+ *
+ * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
+ * ice_add_mac. Caller should be aware that this call will only work if all
+ * the entries passed into m_list were added previously. It will not attempt to
+ * do a partial remove of entries that were found.
+ */
+enum ice_status
+ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
+{
+ struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
+ u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *m_entry;
+ struct ice_fltr_list_entry *m_list_itr;
+ u16 elem_sent, total_elem_left;
+ enum ice_status status = 0;
+ u16 num_unicast = 0;
+
+ if (!m_list)
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry(m_list_itr, m_list, list_entry) {
+ u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
+
+ if (is_unicast_ether_addr(addr) && !hw->ucast_shared)
+ num_unicast++;
+ else if (is_multicast_ether_addr(addr) ||
+ (is_unicast_ether_addr(addr) && hw->ucast_shared))
+ ice_remove_mac_entry(hw, m_list_itr);
+ }
+
+ /* Exit if no unicast addresses found. Multicast switch rules
+ * were added individually
+ */
+ if (!num_unicast)
+ return 0;
+
+ /* Allocate switch rule buffer for the bulk update for unicast */
+ s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
+ GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ r_iter = s_rule;
+ list_for_each_entry(m_list_itr, m_list, list_entry) {
+ u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
+
+ if (is_unicast_ether_addr(addr)) {
+ m_entry = ice_find_mac_entry(hw, addr);
+ if (!m_entry) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto ice_remove_mac_exit;
+ }
+
+ ice_fill_sw_rule(hw, &m_entry->fltr_info,
+ r_iter, ice_aqc_opc_remove_sw_rules);
+ r_iter = (struct ice_aqc_sw_rules_elem *)
+ ((u8 *)r_iter + s_rule_size);
+ }
+ }
+
+ /* Call AQ bulk switch rule update for all unicast addresses */
+ r_iter = s_rule;
+ /* Call AQ switch rule in AQ_MAX chunk */
+ for (total_elem_left = num_unicast; total_elem_left > 0;
+ total_elem_left -= elem_sent) {
+ struct ice_aqc_sw_rules_elem *entry = r_iter;
+
+ elem_sent = min(total_elem_left,
+ (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
+ status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
+ elem_sent, ice_aqc_opc_remove_sw_rules,
+ NULL);
+ if (status)
+ break;
+ r_iter = (struct ice_aqc_sw_rules_elem *)
+ ((u8 *)r_iter + s_rule_size);
+ }
+
+ list_for_each_entry(m_list_itr, m_list, list_entry) {
+ u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr;
+
+ if (is_unicast_ether_addr(addr)) {
+ m_entry = ice_find_mac_entry(hw, addr);
+ if (!m_entry)
+ return ICE_ERR_OUT_OF_RANGE;
+ mutex_lock(&sw->mac_list_lock);
+ list_del(&m_entry->list_entry);
+ mutex_unlock(&sw->mac_list_lock);
+ devm_kfree(ice_hw_to_dev(hw), m_entry);
+ }
+ }
+
+ice_remove_mac_exit:
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ return status;
+}
+
+/**
+ * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default
+ * VSI for the switch (represented by swid)
+ * @hw: pointer to the hardware structure
+ * @vsi_id: number of VSI to set as default
+ * @set: true to add the above mentioned switch rule, false to remove it
+ * @direction: ICE_FLTR_RX or ICE_FLTR_TX
+ */
+enum ice_status
+ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_fltr_info f_info;
+ enum ice_adminq_opc opcode;
+ enum ice_status status;
+ u16 s_rule_size;
+
+ s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ memset(&f_info, 0, sizeof(f_info));
+
+ f_info.lkup_type = ICE_SW_LKUP_DFLT;
+ f_info.flag = direction;
+ f_info.fltr_act = ICE_FWD_TO_VSI;
+ f_info.fwd_id.vsi_id = vsi_id;
+
+ if (f_info.flag & ICE_FLTR_RX) {
+ f_info.src = hw->port_info->lport;
+ if (!set)
+ f_info.fltr_rule_id =
+ hw->port_info->dflt_rx_vsi_rule_id;
+ } else if (f_info.flag & ICE_FLTR_TX) {
+ f_info.src = vsi_id;
+ if (!set)
+ f_info.fltr_rule_id =
+ hw->port_info->dflt_tx_vsi_rule_id;
+ }
+
+ if (set)
+ opcode = ice_aqc_opc_add_sw_rules;
+ else
+ opcode = ice_aqc_opc_remove_sw_rules;
+
+ ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
+
+ status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
+ if (status || !(f_info.flag & ICE_FLTR_TX_RX))
+ goto out;
+ if (set) {
+ u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+
+ if (f_info.flag & ICE_FLTR_TX) {
+ hw->port_info->dflt_tx_vsi_num = vsi_id;
+ hw->port_info->dflt_tx_vsi_rule_id = index;
+ } else if (f_info.flag & ICE_FLTR_RX) {
+ hw->port_info->dflt_rx_vsi_num = vsi_id;
+ hw->port_info->dflt_rx_vsi_rule_id = index;
+ }
+ } else {
+ if (f_info.flag & ICE_FLTR_TX) {
+ hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
+ hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
+ } else if (f_info.flag & ICE_FLTR_RX) {
+ hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
+ hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
+ }
+ }
+
+out:
+ devm_kfree(ice_hw_to_dev(hw), s_rule);
+ return status;
+}
+
+/**
+ * ice_remove_vlan_internal - Remove one VLAN based filter rule
+ * @hw: pointer to the hardware structure
+ * @f_entry: filter entry containing one VLAN information
+ */
+static enum ice_status
+ice_remove_vlan_internal(struct ice_hw *hw,
+ struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_fltr_info *new_fltr;
+ struct ice_fltr_mgmt_list_entry *v_list_elem;
+ u16 vsi_id;
+
+ new_fltr = &f_entry->fltr_info;
+
+ v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id);
+ if (!v_list_elem)
+ return ICE_ERR_PARAM;
+
+ vsi_id = f_entry->fltr_info.fwd_id.vsi_id;
+ return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem);
+}
+
+/**
+ * ice_remove_vlan - Remove VLAN based filter rule
+ * @hw: pointer to the hardware structure
+ * @v_list: list of VLAN entries and forwarding information
+ */
+enum ice_status
+ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
+{
+ struct ice_fltr_list_entry *v_list_itr;
+ enum ice_status status = 0;
+
+ if (!v_list || !hw)
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry(v_list_itr, v_list, list_entry) {
+ status = ice_remove_vlan_internal(hw, v_list_itr);
+ if (status) {
+ v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL;
+ return status;
+ }
+ v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS;
+ }
+ return status;
+}
+
+/**
+ * ice_add_to_vsi_fltr_list - Add VSI filters to the list
+ * @hw: pointer to the hardware structure
+ * @vsi_id: ID of VSI to remove filters from
+ * @lkup_list_head: pointer to the list that has certain lookup type filters
+ * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id
+ */
+static enum ice_status
+ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id,
+ struct list_head *lkup_list_head,
+ struct list_head *vsi_list_head)
+{
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+
+ /* check to make sure VSI id is valid and within boundary */
+ if (vsi_id >=
+ (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1))
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
+ struct ice_fltr_info *fi;
+
+ fi = &fm_entry->fltr_info;
+ if ((fi->fltr_act == ICE_FWD_TO_VSI &&
+ fi->fwd_id.vsi_id == vsi_id) ||
+ (fi->fltr_act == ICE_FWD_TO_VSI_LIST &&
+ (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) {
+ struct ice_fltr_list_entry *tmp;
+
+ /* this memory is freed up in the caller function
+ * ice_remove_vsi_lkup_fltr() once filters for
+ * this VSI are removed
+ */
+ tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp),
+ GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ memcpy(&tmp->fltr_info, fi, sizeof(*fi));
+
+ /* Expected below fields to be set to ICE_FWD_TO_VSI and
+ * the particular VSI id since we are only removing this
+ * one VSI
+ */
+ if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) {
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.fwd_id.vsi_id = vsi_id;
+ }
+
+ list_add(&tmp->list_entry, vsi_list_head);
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_id: ID of VSI to remove filters from
+ * @lkup: switch rule filter lookup type
+ */
+static void
+ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id,
+ enum ice_sw_lkup_type lkup)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *fm_entry;
+ struct list_head remove_list_head;
+ struct ice_fltr_list_entry *tmp;
+ enum ice_status status;
+
+ INIT_LIST_HEAD(&remove_list_head);
+ switch (lkup) {
+ case ICE_SW_LKUP_MAC:
+ mutex_lock(&sw->mac_list_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_id,
+ &sw->mac_list_head,
+ &remove_list_head);
+ mutex_unlock(&sw->mac_list_lock);
+ if (!status) {
+ ice_remove_mac(hw, &remove_list_head);
+ goto free_fltr_list;
+ }
+ break;
+ case ICE_SW_LKUP_VLAN:
+ mutex_lock(&sw->vlan_list_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_id,
+ &sw->vlan_list_head,
+ &remove_list_head);
+ mutex_unlock(&sw->vlan_list_lock);
+ if (!status) {
+ ice_remove_vlan(hw, &remove_list_head);
+ goto free_fltr_list;
+ }
+ break;
+ case ICE_SW_LKUP_MAC_VLAN:
+ case ICE_SW_LKUP_ETHERTYPE:
+ case ICE_SW_LKUP_ETHERTYPE_MAC:
+ case ICE_SW_LKUP_PROMISC:
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ case ICE_SW_LKUP_DFLT:
+ ice_debug(hw, ICE_DBG_SW,
+ "Remove filters for this lookup type hasn't been implemented yet\n");
+ break;
+ }
+
+ return;
+free_fltr_list:
+ list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
+ list_del(&fm_entry->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fm_entry);
+ }
+}
+
+/**
+ * ice_remove_vsi_fltr - Remove all filters for a VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_id: ID of VSI to remove filters from
+ */
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id)
+{
+ ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_MAC_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_DFLT);
+ ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE);
+ ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
new file mode 100644
index 000000000000..6f4a0d159dbf
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_SWITCH_H_
+#define _ICE_SWITCH_H_
+
+#include "ice_common.h"
+
+#define ICE_SW_CFG_MAX_BUF_LEN 2048
+#define ICE_DFLT_VSI_INVAL 0xff
+#define ICE_VSI_INVAL_ID 0xffff
+
+/* VSI context structure for add/get/update/free operations */
+struct ice_vsi_ctx {
+ u16 vsi_num;
+ u16 vsis_allocd;
+ u16 vsis_unallocated;
+ u16 flags;
+ struct ice_aqc_vsi_props info;
+ bool alloc_from_pool;
+};
+
+enum ice_sw_fwd_act_type {
+ ICE_FWD_TO_VSI = 0,
+ ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
+ ICE_FWD_TO_Q,
+ ICE_FWD_TO_QGRP,
+ ICE_DROP_PACKET,
+ ICE_INVAL_ACT
+};
+
+/* Switch recipe ID enum values are specific to hardware */
+enum ice_sw_lkup_type {
+ ICE_SW_LKUP_ETHERTYPE = 0,
+ ICE_SW_LKUP_MAC = 1,
+ ICE_SW_LKUP_MAC_VLAN = 2,
+ ICE_SW_LKUP_PROMISC = 3,
+ ICE_SW_LKUP_VLAN = 4,
+ ICE_SW_LKUP_DFLT = 5,
+ ICE_SW_LKUP_ETHERTYPE_MAC = 8,
+ ICE_SW_LKUP_PROMISC_VLAN = 9,
+};
+
+struct ice_fltr_info {
+ /* Look up information: how to look up packet */
+ enum ice_sw_lkup_type lkup_type;
+ /* Forward action: filter action to do after lookup */
+ enum ice_sw_fwd_act_type fltr_act;
+ /* rule ID returned by firmware once filter rule is created */
+ u16 fltr_rule_id;
+ u16 flag;
+#define ICE_FLTR_RX BIT(0)
+#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
+
+ /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
+ u16 src;
+
+ union {
+ struct {
+ u8 mac_addr[ETH_ALEN];
+ } mac;
+ struct {
+ u8 mac_addr[ETH_ALEN];
+ u16 vlan_id;
+ } mac_vlan;
+ struct {
+ u16 vlan_id;
+ } vlan;
+ /* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
+ * if just using ethertype as filter. Set lkup_type as
+ * ICE_SW_LKUP_ETHERTYPE_MAC if MAC also needs to be
+ * passed in as filter.
+ */
+ struct {
+ u16 ethertype;
+ u8 mac_addr[ETH_ALEN]; /* optional */
+ } ethertype_mac;
+ } l_data;
+
+ /* Depending on filter action */
+ union {
+ /* queue id in case of ICE_FWD_TO_Q and starting
+ * queue id in case of ICE_FWD_TO_QGRP.
+ */
+ u16 q_id:11;
+ u16 vsi_id:10;
+ u16 vsi_list_id:10;
+ } fwd_id;
+
+ /* Set to num_queues if action is ICE_FWD_TO_QGRP. This field
+ * determines the range of queues the packet needs to be forwarded to
+ */
+ u8 qgrp_size;
+
+ /* Rule creations populate these indicators basing on the switch type */
+ bool lb_en; /* Indicate if packet can be looped back */
+ bool lan_en; /* Indicate if packet can be forwarded to the uplink */
+};
+
+/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
+struct ice_vsi_list_map_info {
+ struct list_head list_entry;
+ DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
+ u16 vsi_list_id;
+};
+
+enum ice_sw_fltr_status {
+ ICE_FLTR_STATUS_NEW = 0,
+ ICE_FLTR_STATUS_FW_SUCCESS,
+ ICE_FLTR_STATUS_FW_FAIL,
+};
+
+struct ice_fltr_list_entry {
+ struct list_head list_entry;
+ enum ice_sw_fltr_status status;
+ struct ice_fltr_info fltr_info;
+};
+
+/* This defines an entry in the list that maintains MAC or VLAN membership
+ * to HW list mapping, since multiple VSIs can subscribe to the same MAC or
+ * VLAN. As an optimization the VSI list should be created only when a
+ * second VSI becomes a subscriber to the VLAN address.
+ */
+struct ice_fltr_mgmt_list_entry {
+ /* back pointer to VSI list id to VSI list mapping */
+ struct ice_vsi_list_map_info *vsi_list_info;
+ u16 vsi_count;
+#define ICE_INVAL_LG_ACT_INDEX 0xffff
+ u16 lg_act_idx;
+#define ICE_INVAL_SW_MARKER_ID 0xffff
+ u16 sw_marker_id;
+ struct list_head list_entry;
+ struct ice_fltr_info fltr_info;
+#define ICE_INVAL_COUNTER_ID 0xff
+ u8 counter_index;
+};
+
+/* VSI related commands */
+enum ice_status
+ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd);
+
+enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
+
+/* Switch/bridge related commands */
+enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
+enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id);
+enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
+enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
+enum ice_status
+ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction);
+
+#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
new file mode 100644
index 000000000000..6481e3d86374
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -0,0 +1,1782 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+/* The driver transmit and receive code */
+
+#include <linux/prefetch.h>
+#include <linux/mm.h>
+#include "ice.h"
+
+#define ICE_RX_HDR_SIZE 256
+
+/**
+ * ice_unmap_and_free_tx_buf - Release a Tx buffer
+ * @ring: the ring that owns the buffer
+ * @tx_buf: the buffer to free
+ */
+static void
+ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
+{
+ if (tx_buf->skb) {
+ dev_kfree_skb_any(tx_buf->skb);
+ if (dma_unmap_len(tx_buf, len))
+ dma_unmap_single(ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ }
+
+ tx_buf->next_to_watch = NULL;
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+ /* tx_buf must be completely set up in the transmit path */
+}
+
+static struct netdev_queue *txring_txq(const struct ice_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->q_index);
+}
+
+/**
+ * ice_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ */
+void ice_clean_tx_ring(struct ice_ring *tx_ring)
+{
+ unsigned long size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_buf)
+ return;
+
+ /* Free all the Tx ring sk_bufss */
+ for (i = 0; i < tx_ring->count; i++)
+ ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
+
+ size = sizeof(struct ice_tx_buf) * tx_ring->count;
+ memset(tx_ring->tx_buf, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+}
+
+/**
+ * ice_free_tx_ring - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ */
+void ice_free_tx_ring(struct ice_ring *tx_ring)
+{
+ ice_clean_tx_ring(tx_ring);
+ devm_kfree(tx_ring->dev, tx_ring->tx_buf);
+ tx_ring->tx_buf = NULL;
+
+ if (tx_ring->desc) {
+ dmam_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+}
+
+/**
+ * ice_clean_tx_irq - Reclaim resources after transmit completes
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ */
+static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
+ int napi_budget)
+{
+ unsigned int total_bytes = 0, total_pkts = 0;
+ unsigned int budget = vsi->work_lmt;
+ s16 i = tx_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+
+ tx_buf = &tx_ring->tx_buf[i];
+ tx_desc = ICE_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ smp_rmb(); /* prevent any other reads prior to eop_desc */
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_pkts += tx_buf->gso_segs;
+
+ /* free the skb */
+ napi_consume_skb(tx_buf->skb, napi_budget);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buf data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_buf;
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ }
+
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.pkts += total_pkts;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_pkts += total_pkts;
+
+ netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
+ total_bytes);
+
+#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
+ if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
+ (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->q_index) &&
+ !test_bit(__ICE_DOWN, vsi->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->q_index);
+ ++tx_ring->tx_stats.restart_q;
+ }
+ }
+
+ return !!budget;
+}
+
+/**
+ * ice_setup_tx_ring - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ */
+int ice_setup_tx_ring(struct ice_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ /* warn if we are about to overwrite the pointer */
+ WARN_ON(tx_ring->tx_buf);
+ bi_size = sizeof(struct ice_tx_buf) * tx_ring->count;
+ tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);
+ if (!tx_ring->tx_buf)
+ return -ENOMEM;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
+ GFP_KERNEL);
+ if (!tx_ring->desc) {
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_ring->size);
+ goto err;
+ }
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ devm_kfree(dev, tx_ring->tx_buf);
+ tx_ring->tx_buf = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * ice_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ */
+void ice_clean_rx_ring(struct ice_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ unsigned long size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_buf)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+
+ if (rx_buf->skb) {
+ dev_kfree_skb(rx_buf->skb);
+ rx_buf->skb = NULL;
+ }
+ if (!rx_buf->page)
+ continue;
+
+ dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_pages(rx_buf->page, 0);
+
+ rx_buf->page = NULL;
+ rx_buf->page_offset = 0;
+ }
+
+ size = sizeof(struct ice_rx_buf) * rx_ring->count;
+ memset(rx_ring->rx_buf, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * ice_free_rx_ring - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ */
+void ice_free_rx_ring(struct ice_ring *rx_ring)
+{
+ ice_clean_rx_ring(rx_ring);
+ devm_kfree(rx_ring->dev, rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+
+ if (rx_ring->desc) {
+ dmam_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+}
+
+/**
+ * ice_setup_rx_ring - Allocate the Rx descriptors
+ * @rx_ring: the rx ring to set up
+ *
+ * Return 0 on success, negative on error
+ */
+int ice_setup_rx_ring(struct ice_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ /* warn if we are about to overwrite the pointer */
+ WARN_ON(rx_ring->rx_buf);
+ bi_size = sizeof(struct ice_rx_buf) * rx_ring->count;
+ rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);
+ if (!rx_ring->rx_buf)
+ return -ENOMEM;
+
+ /* round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
+ GFP_KERNEL);
+ if (!rx_ring->desc) {
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rx_ring->size);
+ goto err;
+ }
+
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ devm_kfree(dev, rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * ice_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ */
+static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * ice_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buf struct to modify
+ *
+ * Returns true if the page was successfully allocated or
+ * reused.
+ */
+static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
+ struct ice_rx_buf *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page)) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
+
+ /* alloc new page for storage */
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_pages(page, 0);
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+
+ return true;
+}
+
+/**
+ * ice_alloc_rx_bufs - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ *
+ * Returns false if all allocations were successful, true if any fail
+ */
+bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
+{
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ struct ice_rx_buf *bi;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return false;
+
+ /* get the RX descriptor and buffer based on next_to_use */
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+ bi = &rx_ring->rx_buf[ntu];
+
+ do {
+ if (!ice_alloc_mapped_page(rx_ring, bi))
+ goto no_bufs;
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+
+ rx_desc++;
+ bi++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buf;
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.status_error0 = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return false;
+
+no_bufs:
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+
+ /* make sure to come back via polling to try again after
+ * allocation failure
+ */
+ return true;
+}
+
+/**
+ * ice_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static bool ice_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_buf: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buf to place the data into
+ *
+ * This function will add the data contained in rx_buf->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ */
+static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb)
+{
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ICE_RXBUF_2048;
+#else
+ unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
+ unsigned int truesize;
+#endif /* PAGE_SIZE < 8192) */
+
+ struct page *page;
+ unsigned int size;
+
+ size = le16_to_cpu(rx_desc->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+
+ page = rx_buf->page;
+
+#if (PAGE_SIZE >= 8192)
+ truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif /* PAGE_SIZE >= 8192) */
+
+ /* will the data fit in the skb we allocated? if so, just
+ * copy it as it is pretty small anyway
+ */
+ if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buf->page_offset;
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!ice_page_is_reserved(page)))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ __free_pages(page, 0);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buf->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(ice_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buf->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buf->page_offset += truesize;
+
+ if (rx_buf->page_offset > last_offset)
+ return false;
+#endif /* PAGE_SIZE < 8192) */
+
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ get_page(rx_buf->page);
+
+ return true;
+}
+
+/**
+ * ice_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buf: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ */
+static void ice_reuse_rx_page(struct ice_ring *rx_ring,
+ struct ice_rx_buf *old_buf)
+{
+ u16 nta = rx_ring->next_to_alloc;
+ struct ice_rx_buf *new_buf;
+
+ new_buf = &rx_ring->rx_buf[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ *new_buf = *old_buf;
+}
+
+/**
+ * ice_fetch_rx_buf - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
+ *
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ struct ice_rx_buf *rx_buf;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+ page = rx_buf->page;
+ prefetchw(page);
+
+ skb = rx_buf->skb;
+
+ if (likely(!skb)) {
+ u8 *page_addr = page_address(page) + rx_buf->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch((void *)(page_addr + L1_CACHE_BYTES));
+#endif /* L1_CACHE_BYTES */
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ ICE_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_buf_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+
+ skb_record_rx_queue(skb, rx_ring->q_index);
+ } else {
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
+ rx_buf->page_offset,
+ ICE_RXBUF_2048,
+ DMA_FROM_DEVICE);
+
+ rx_buf->skb = NULL;
+ }
+
+ /* pull page into skb */
+ if (ice_add_rx_frag(rx_buf, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ ice_reuse_rx_page(rx_ring, rx_buf);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buf->page = NULL;
+
+ return skb;
+}
+
+/**
+ * ice_pull_tail - ice specific version of skb_pull_tail
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an ice specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void ice_pull_tail(struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int pull_len;
+ unsigned char *va;
+
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * ice_cleanup_headers - Correct empty headers
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ */
+static bool ice_cleanup_headers(struct sk_buff *skb)
+{
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ ice_pull_tail(skb);
+
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
+ const u16 stat_err_bits)
+{
+ return !!(rx_desc->wb.status_error0 &
+ cpu_to_le16(stat_err_bits));
+}
+
+/**
+ * ice_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ */
+static bool ice_is_non_eop(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(ICE_RX_DESC(rx_ring, ntc));
+
+ /* if we are the last buffer then there is nothing else to do */
+#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
+ if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
+ return false;
+
+ /* place skb in next buffer to be received */
+ rx_ring->rx_buf[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+/**
+ * ice_ptype_to_htype - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ */
+static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
+{
+ return PKT_HASH_TYPE_NONE;
+}
+
+/**
+ * ice_rx_hash - set the hash value in the skb
+ * @rx_ring: descriptor ring
+ * @rx_desc: specific descriptor
+ * @skb: pointer to current skb
+ * @rx_ptype: the ptype value from the descriptor
+ */
+static void
+ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 rx_ptype)
+{
+ struct ice_32b_rx_flex_desc_nic *nic_mdid;
+ u32 hash;
+
+ if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
+ return;
+
+ if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
+ return;
+
+ nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ hash = le32_to_cpu(nic_mdid->rss_hash);
+ skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+}
+
+/**
+ * ice_rx_csum - Indicate in skb if checksum is good
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ * @ptype: the packet type decoded by hardware
+ *
+ * skb->protocol must be set before this function is called
+ */
+static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
+ union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+{
+ struct ice_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+
+ rx_status = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_error = rx_status;
+
+ decoded = ice_decode_rx_desc_ptype(ptype);
+
+ /* Start with CHECKSUM_NONE and by default csum_level = 0 */
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
+ /* check if Rx checksum is enabled */
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* check if HW has decoded the packet and checksum */
+ if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
+ return;
+
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
+ ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+
+ if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ goto checksum_fail;
+ else if (ipv6 && (rx_status &
+ (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
+ goto checksum_fail;
+
+ /* check for L4 errors and handle packets that were not able to be
+ * checksummed due to arrival speed
+ */
+ if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
+ goto checksum_fail;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ case ICE_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ default:
+ break;
+ }
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
+}
+
+/**
+ * ice_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @ptype: the packet type decoded by hardware
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
+ */
+static void ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype)
+{
+ ice_rx_hash(rx_ring, rx_desc, skb, ptype);
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
+}
+
+/**
+ * ice_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ *
+ * This function sends the completed packet (via. skb) up the stack using
+ * gro receive functions (with/without vlan tag)
+ */
+static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
+ u16 vlan_tag)
+{
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK)) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ }
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
+}
+
+/**
+ * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ */
+static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ bool failure = false;
+
+ /* start the loop to process RX packets bounded by 'budget' */
+ while (likely(total_rx_pkts < (unsigned int)budget)) {
+ union ice_32b_rx_flex_desc *rx_desc;
+ struct sk_buff *skb;
+ u16 stat_err_bits;
+ u16 vlan_tag = 0;
+ u8 rx_ptype;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= ICE_RX_BUF_WRITE) {
+ failure = failure ||
+ ice_alloc_rx_bufs(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* get the RX desc from RX ring based on 'next_to_clean' */
+ rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+ /* status_error_len will always be zero for unused descriptors
+ * because it's cleared in cleanup, and overlaps with hdr_addr
+ * which is always zero because packet split isn't used, if the
+ * hardware wrote DD then it will be non-zero
+ */
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc, stat_err_bits))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * DD bit is set.
+ */
+ dma_rmb();
+
+ /* allocate (if needed) and populate skb */
+ skb = ice_fetch_rx_buf(rx_ring, rx_desc);
+ if (!skb)
+ break;
+
+ cleaned_count++;
+
+ /* skip if it is NOP desc */
+ if (ice_is_non_eop(rx_ring, rx_desc, skb))
+ continue;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
+ if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
+ ICE_RX_FLEX_DESC_PTYPE_M;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
+ if (ice_test_staterr(rx_desc, stat_err_bits))
+ vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+
+ /* correct empty headers and pad skb if needed (to make valid
+ * ethernet frame
+ */
+ if (ice_cleanup_headers(skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+
+ /* populate checksum, VLAN, and protocol */
+ ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+
+ /* send completed skb up the stack */
+ ice_receive_skb(rx_ring, skb, vlan_tag);
+
+ /* update budget accounting */
+ total_rx_pkts++;
+ }
+
+ /* update queue and vector specific stats */
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.pkts += total_rx_pkts;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ /* guarantee a trip back through this routine if there was a failure */
+ return failure ? budget : (int)total_rx_pkts;
+}
+
+/**
+ * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ */
+int ice_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ice_q_vector *q_vector =
+ container_of(napi, struct ice_q_vector, napi);
+ struct ice_vsi *vsi = q_vector->vsi;
+ struct ice_pf *pf = vsi->back;
+ bool clean_complete = true;
+ int budget_per_ring = 0;
+ struct ice_ring *ring;
+ int work_done = 0;
+
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ ice_for_each_ring(ring, q_vector->tx)
+ if (!ice_clean_tx_irq(vsi, ring, budget))
+ clean_complete = false;
+
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (budget <= 0)
+ return budget;
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ */
+ if (q_vector->num_ring_rx)
+ budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
+
+ ice_for_each_ring(ring, q_vector->rx) {
+ int cleaned;
+
+ cleaned = ice_clean_rx_irq(ring, budget_per_ring);
+ work_done += cleaned;
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
+ }
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ /* Work is done so exit the polling mode and re-enable the interrupt */
+ napi_complete_done(napi, work_done);
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
+ return 0;
+}
+
+/* helper function for building cmd/type/offset */
+static __le64
+build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
+{
+ return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
+ (td_cmd << ICE_TXD_QW1_CMD_S) |
+ (td_offset << ICE_TXD_QW1_OFFSET_S) |
+ ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
+ (td_tag << ICE_TXD_QW1_L2TAG1_S));
+}
+
+/**
+ * __ice_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ */
+static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(ICE_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_subqueue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
+ ++tx_ring->tx_stats.restart_q;
+ return 0;
+}
+
+/**
+ * ice_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ */
+static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+{
+ if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ice_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * ice_tx_map - Build the Tx descriptor
+ * @tx_ring: ring to send buffer on
+ * @first: first buffer info buffer to use
+ * @off: pointer to struct that holds offload parameters
+ *
+ * This function loops over the skb data pointed to by *first
+ * and gets a physical address for each memory location and programs
+ * it and the length into the transmit descriptor.
+ */
+static void
+ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
+ struct ice_tx_offload_params *off)
+{
+ u64 td_offset, td_tag, td_cmd;
+ u16 i = tx_ring->next_to_use;
+ struct skb_frag_struct *frag;
+ unsigned int data_len, size;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+
+ td_tag = off->td_l2tag1;
+ td_cmd = off->td_cmd;
+ td_offset = off->td_offset;
+ skb = first->skb;
+
+ data_len = skb->data_len;
+ size = skb_headlen(skb);
+
+ tx_desc = ICE_TX_DESC(tx_ring, i);
+
+ if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
+ td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
+ td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
+ ICE_TX_FLAGS_VLAN_S;
+ }
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_buf = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
+
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ /* align size to end of page */
+ max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+
+ /* account for data chunks larger than the hardware
+ * can handle
+ */
+ while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, max_data, td_tag);
+
+ tx_desc++;
+ i++;
+
+ if (i == tx_ring->count) {
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ dma += max_data;
+ size -= max_data;
+
+ max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
+
+ tx_desc++;
+ i++;
+
+ if (i == tx_ring->count) {
+ tx_desc = ICE_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_buf = &tx_ring->tx_buf[i];
+ }
+
+ /* record bytecount for BQL */
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
+ /* record SW timestamp if HW timestamp is not available */
+ skb_tx_timestamp(first->skb);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ /* write last descriptor with RS and EOP bits */
+ td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ *
+ * We also use this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ tx_ring->next_to_use = i;
+
+ ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ /* notify HW of packet */
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
+ }
+
+ return;
+
+dma_error:
+ /* clear dma mappings for failed tx_buf map */
+ for (;;) {
+ tx_buf = &tx_ring->tx_buf[i];
+ ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
+ if (tx_buf == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ tx_ring->next_to_use = i;
+}
+
+/**
+ * ice_tx_csum - Enable Tx checksum offloads
+ * @first: pointer to the first descriptor
+ * @off: pointer to struct that holds offload parameters
+ *
+ * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
+ */
+static
+int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
+{
+ u32 l4_len = 0, l3_len = 0, l2_len = 0;
+ struct sk_buff *skb = first->skb;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ __be16 frag_off, protocol;
+ unsigned char *exthdr;
+ u32 offset, cmd = 0;
+ u8 l4_proto = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* compute outer L2 header size */
+ l2_len = ip.hdr - skb->data;
+ offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
+
+ if (skb->encapsulation)
+ return -1;
+
+ /* Enable IP checksum offloads */
+ protocol = vlan_get_protocol(skb);
+ if (protocol == htons(ETH_P_IP)) {
+ l4_proto = ip.v4->protocol;
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (first->tx_flags & ICE_TX_FLAGS_TSO)
+ cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ else
+ cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
+ &frag_off);
+ } else {
+ return -1;
+ }
+
+ /* compute inner L3 header size */
+ l3_len = l4.hdr - ip.hdr;
+ offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
+ l4_len = l4.tcp->doff;
+ offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
+ l4_len = (sizeof(struct udphdr) >> 2);
+ offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ case IPPROTO_SCTP:
+ default:
+ if (first->tx_flags & ICE_TX_FLAGS_TSO)
+ return -1;
+ skb_checksum_help(skb);
+ return 0;
+ }
+
+ off->td_cmd |= cmd;
+ off->td_offset |= offset;
+ return 1;
+}
+
+/**
+ * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @tx_ring: ring to send buffer on
+ * @first: pointer to struct ice_tx_buf
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise returns 0 to indicate the flags has been set properly.
+ */
+static int
+ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
+{
+ struct sk_buff *skb = first->skb;
+ __be16 protocol = skb->protocol;
+
+ if (protocol == htons(ETH_P_8021Q) &&
+ !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* when HW VLAN acceleration is turned off by the user the
+ * stack sets the protocol to 8021q so that the driver
+ * can take any steps required to support the SW only
+ * VLAN handling. In our case the driver doesn't need
+ * to take any further steps so just set the protocol
+ * to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ goto out;
+ }
+
+ /* if we have a HW VLAN tag being added, default to the HW one */
+ if (skb_vlan_tag_present(skb)) {
+ first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ } else if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+
+ /* for SW VLAN, check the next protocol and store the tag */
+ vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
+ sizeof(_vhdr),
+ &_vhdr);
+ if (!vhdr)
+ return -EINVAL;
+
+ first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
+ ICE_TX_FLAGS_VLAN_S;
+ first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
+ }
+
+out:
+ return 0;
+}
+
+/**
+ * ice_tso - computes mss and TSO length to prepare for TSO
+ * @first: pointer to struct ice_tx_buf
+ * @off: pointer to struct that holds offload parameters
+ *
+ * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
+ */
+static
+int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
+{
+ struct sk_buff *skb = first->skb;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u64 cd_mss, cd_tso_len;
+ u32 paylen, l4_start;
+ int err;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
+
+ /* determine offset of transport header */
+ l4_start = l4.hdr - skb->data;
+
+ /* remove payload length from checksum */
+ paylen = skb->len - l4_start;
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
+
+ /* compute length of segmentation header */
+ off->header_len = (l4.tcp->doff * 4) + l4_start;
+
+ /* update gso_segs and bytecount */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount = (first->gso_segs - 1) * off->header_len;
+
+ cd_tso_len = skb->len - off->header_len;
+ cd_mss = skb_shinfo(skb)->gso_size;
+
+ /* record cdesc_qw1 with TSO parameters */
+ off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
+ (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
+ (cd_mss << ICE_TXD_CTX_QW1_MSS_S);
+ first->tx_flags |= ICE_TX_FLAGS_TSO;
+ return 1;
+}
+
+/**
+ * ice_txd_use_count - estimate the number of descriptors needed for Tx
+ * @size: transmit request size in bytes
+ *
+ * Due to hardware alignment restrictions (4K alignment), we need to
+ * assume that we can have no more than 12K of data per descriptor, even
+ * though each descriptor can take up to 16K - 1 bytes of aligned memory.
+ * Thus, we need to divide by 12K. But division is slow! Instead,
+ * we decompose the operation into shifts and one relatively cheap
+ * multiply operation.
+ *
+ * To divide by 12K, we first divide by 4K, then divide by 3:
+ * To divide by 4K, shift right by 12 bits
+ * To divide by 3, multiply by 85, then divide by 256
+ * (Divide by 256 is done by shifting right by 8 bits)
+ * Finally, we add one to round up. Because 256 isn't an exact multiple of
+ * 3, we'll underestimate near each multiple of 12K. This is actually more
+ * accurate as we have 4K - 1 of wiggle room that we can fit into the last
+ * segment. For our purposes this is accurate out to 1M which is orders of
+ * magnitude greater than our largest possible GSO size.
+ *
+ * This would then be implemented as:
+ * return (((size >> 12) * 85) >> 8) + 1;
+ *
+ * Since multiplication and division are commutative, we can reorder
+ * operations into:
+ * return ((size * 85) >> 20) + 1;
+ */
+static unsigned int ice_txd_use_count(unsigned int size)
+{
+ return ((size * 85) >> 20) + 1;
+}
+
+/**
+ * ice_xmit_desc_count - calculate number of tx descriptors needed
+ * @skb: send buffer
+ *
+ * Returns number of data descriptors needed for this skb.
+ */
+static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
+{
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int count = 0, size = skb_headlen(skb);
+
+ for (;;) {
+ count += ice_txd_use_count(size);
+
+ if (!nr_frags--)
+ break;
+
+ size = skb_frag_size(frag++);
+ }
+
+ return count;
+}
+
+/**
+ * __ice_chk_linearize - Check if there are more than 8 buffers per packet
+ * @skb: send buffer
+ *
+ * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
+ */
+static bool __ice_chk_linearize(struct sk_buff *skb)
+{
+ const struct skb_frag_struct *frag, *stale;
+ int nr_frags, sum;
+
+ /* no need to check if number of frags is less than 7 */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (nr_frags < (ICE_MAX_BUF_TXD - 1))
+ return false;
+
+ /* We need to walk through the list and validate that each group
+ * of 6 fragments totals at least gso_size.
+ */
+ nr_frags -= ICE_MAX_BUF_TXD - 2;
+ frag = &skb_shinfo(skb)->frags[0];
+
+ /* Initialize size to the negative value of gso_size minus 1. We
+ * use this as the worst case scenerio in which the frag ahead
+ * of us only provides one byte which is why we are limited to 6
+ * descriptors for a single transmit as the header and previous
+ * fragment are already consuming 2 descriptors.
+ */
+ sum = 1 - skb_shinfo(skb)->gso_size;
+
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+ stale = &skb_shinfo(skb)->frags[0];
+ for (;;) {
+ sum += skb_frag_size(frag++);
+
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+
+ if (!nr_frags--)
+ break;
+
+ sum -= skb_frag_size(stale++);
+ }
+
+ return false;
+}
+
+/**
+ * ice_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @count: number of buffers used
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ */
+static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
+{
+ /* Both TSO and single send will work if count is less than 8 */
+ if (likely(count < ICE_MAX_BUF_TXD))
+ return false;
+
+ if (skb_is_gso(skb))
+ return __ice_chk_linearize(skb);
+
+ /* we can support up to 8 data buffers for a single send */
+ return count != ICE_MAX_BUF_TXD;
+}
+
+/**
+ * ice_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+static netdev_tx_t
+ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+{
+ struct ice_tx_offload_params offload = { 0 };
+ struct ice_tx_buf *first;
+ unsigned int count;
+ int tso, csum;
+
+ count = ice_xmit_desc_count(skb);
+ if (ice_chk_linearize(skb, count)) {
+ if (__skb_linearize(skb))
+ goto out_drop;
+ count = ice_txd_use_count(skb->len);
+ tx_ring->tx_stats.tx_linearize++;
+ }
+
+ /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
+ * + 4 desc gap to avoid the cache line where head is,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ tx_ring->tx_stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ offload.tx_ring = tx_ring;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buf[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->gso_segs = 1;
+ first->tx_flags = 0;
+
+ /* prepare the VLAN tagging flags for Tx */
+ if (ice_tx_prepare_vlan_flags(tx_ring, first))
+ goto out_drop;
+
+ /* set up TSO offload */
+ tso = ice_tso(first, &offload);
+ if (tso < 0)
+ goto out_drop;
+
+ /* always set up Tx checksum offload */
+ csum = ice_tx_csum(first, &offload);
+ if (csum < 0)
+ goto out_drop;
+
+ if (tso || offload.cd_tunnel_params) {
+ struct ice_tx_ctx_desc *cdesc;
+ int i = tx_ring->next_to_use;
+
+ /* grab the next descriptor */
+ cdesc = ICE_TX_CTX_DESC(tx_ring, i);
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* setup context descriptor */
+ cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
+ cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
+ cdesc->rsvd = cpu_to_le16(0);
+ cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
+ }
+
+ ice_tx_map(tx_ring, first, &offload);
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_ring *tx_ring;
+
+ tx_ring = vsi->tx_rings[skb->queue_mapping];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (skb_put_padto(skb, ICE_MIN_TX_LEN))
+ return NETDEV_TX_OK;
+
+ return ice_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
new file mode 100644
index 000000000000..567067b650c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_TXRX_H_
+#define _ICE_TXRX_H_
+
+#define ICE_DFLT_IRQ_WORK 256
+#define ICE_RXBUF_2048 2048
+#define ICE_MAX_CHAINED_RX_BUFS 5
+#define ICE_MAX_BUF_TXD 8
+#define ICE_MIN_TX_LEN 17
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define ICE_MAX_READ_REQ_SIZE 4096
+#define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define ICE_MAX_DATA_PER_TXD_ALIGNED \
+ (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
+
+#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
+#define ICE_MAX_TXQ_PER_TXQG 128
+
+/* Tx Descriptors needed, worst case */
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#define ICE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define ICE_TX_FLAGS_TSO BIT(0)
+#define ICE_TX_FLAGS_HW_VLAN BIT(1)
+#define ICE_TX_FLAGS_SW_VLAN BIT(2)
+#define ICE_TX_FLAGS_VLAN_M 0xffff0000
+#define ICE_TX_FLAGS_VLAN_S 16
+
+struct ice_tx_buf {
+ struct ice_tx_desc *next_to_watch;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ u32 tx_flags;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+struct ice_tx_offload_params {
+ u8 header_len;
+ u32 td_cmd;
+ u32 td_offset;
+ u32 td_l2tag1;
+ u16 cd_l2tag2;
+ u32 cd_tunnel_params;
+ u64 cd_qw1;
+ struct ice_ring *tx_ring;
+};
+
+struct ice_rx_buf {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
+};
+
+struct ice_q_stats {
+ u64 pkts;
+ u64 bytes;
+};
+
+struct ice_txq_stats {
+ u64 restart_q;
+ u64 tx_busy;
+ u64 tx_linearize;
+};
+
+struct ice_rxq_stats {
+ u64 non_eop_descs;
+ u64 alloc_page_failed;
+ u64 alloc_buf_failed;
+ u64 page_reuse_count;
+};
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum ice_dyn_idx_t {
+ ICE_IDX_ITR0 = 0,
+ ICE_IDX_ITR1 = 1,
+ ICE_IDX_ITR2 = 2,
+ ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* Header split modes defined by DTYPE field of Rx RLAN context */
+enum ice_rx_dtype {
+ ICE_RX_DTYPE_NO_SPLIT = 0,
+ ICE_RX_DTYPE_HEADER_SPLIT = 1,
+ ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
+};
+
+/* indices into GLINT_ITR registers */
+#define ICE_RX_ITR ICE_IDX_ITR0
+#define ICE_TX_ITR ICE_IDX_ITR1
+#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define ICE_ITR_8K 0x003E
+
+/* apply ITR HW granularity translation to program the HW registers */
+#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
+
+/* Legacy or Advanced Mode Queue */
+#define ICE_TX_ADVANCED 0
+#define ICE_TX_LEGACY 1
+
+/* descriptor ring, associated with a VSI */
+struct ice_ring {
+ struct ice_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ struct net_device *netdev; /* netdev ring maps to */
+ struct ice_vsi *vsi; /* Backreference to associated VSI */
+ struct ice_q_vector *q_vector; /* Backreference to associated vector */
+ u8 __iomem *tail;
+ union {
+ struct ice_tx_buf *tx_buf;
+ struct ice_rx_buf *rx_buf;
+ };
+ u16 q_index; /* Queue number of ring */
+ u32 txq_teid; /* Added Tx queue TEID */
+
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware supports 2us/1us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+
+ /* used in interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ bool ring_active; /* is ring online or not */
+
+ /* stats structs */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct ice_txq_stats tx_stats;
+ struct ice_rxq_stats rx_stats;
+ };
+
+ unsigned int size; /* length of descriptor ring in bytes */
+ dma_addr_t dma; /* physical address of ring */
+ struct rcu_head rcu; /* to avoid race on free */
+ u16 next_to_alloc;
+} ____cacheline_internodealigned_in_smp;
+
+enum ice_latency_range {
+ ICE_LOWEST_LATENCY = 0,
+ ICE_LOW_LATENCY = 1,
+ ICE_BULK_LATENCY = 2,
+ ICE_ULTRA_LATENCY = 3,
+};
+
+struct ice_ring_container {
+ /* array of pointers to rings */
+ struct ice_ring *ring;
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_pkts; /* total packets processed this int */
+ enum ice_latency_range latency_range;
+ u16 itr;
+};
+
+/* iterator for handling rings in ring container */
+#define ice_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos; pos = pos->next)
+
+bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
+netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+void ice_clean_tx_ring(struct ice_ring *tx_ring);
+void ice_clean_rx_ring(struct ice_ring *rx_ring);
+int ice_setup_tx_ring(struct ice_ring *tx_ring);
+int ice_setup_rx_ring(struct ice_ring *rx_ring);
+void ice_free_tx_ring(struct ice_ring *tx_ring);
+void ice_free_rx_ring(struct ice_ring *rx_ring);
+int ice_napi_poll(struct napi_struct *napi, int budget);
+
+#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
new file mode 100644
index 000000000000..99c8a9a71b5e
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_TYPE_H_
+#define _ICE_TYPE_H_
+
+#include "ice_status.h"
+#include "ice_hw_autogen.h"
+#include "ice_osdep.h"
+#include "ice_controlq.h"
+#include "ice_lan_tx_rx.h"
+
+#define ICE_BYTES_PER_WORD 2
+#define ICE_BYTES_PER_DWORD 4
+
+static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
+{
+ return test_bit(tc, (unsigned long *)&bitmap);
+}
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+#define ICE_DBG_INIT BIT_ULL(1)
+#define ICE_DBG_LINK BIT_ULL(4)
+#define ICE_DBG_QCTX BIT_ULL(6)
+#define ICE_DBG_NVM BIT_ULL(7)
+#define ICE_DBG_LAN BIT_ULL(8)
+#define ICE_DBG_SW BIT_ULL(13)
+#define ICE_DBG_SCHED BIT_ULL(14)
+#define ICE_DBG_RES BIT_ULL(17)
+#define ICE_DBG_AQ_MSG BIT_ULL(24)
+#define ICE_DBG_AQ_CMD BIT_ULL(27)
+#define ICE_DBG_USER BIT_ULL(31)
+
+enum ice_aq_res_ids {
+ ICE_NVM_RES_ID = 1,
+ ICE_SPD_RES_ID,
+ ICE_GLOBAL_CFG_LOCK_RES_ID,
+ ICE_CHANGE_LOCK_RES_ID
+};
+
+enum ice_aq_res_access_type {
+ ICE_RES_READ = 1,
+ ICE_RES_WRITE
+};
+
+enum ice_fc_mode {
+ ICE_FC_NONE = 0,
+ ICE_FC_RX_PAUSE,
+ ICE_FC_TX_PAUSE,
+ ICE_FC_FULL,
+ ICE_FC_PFC,
+ ICE_FC_DFLT
+};
+
+enum ice_set_fc_aq_failures {
+ ICE_SET_FC_AQ_FAIL_NONE = 0,
+ ICE_SET_FC_AQ_FAIL_GET,
+ ICE_SET_FC_AQ_FAIL_SET,
+ ICE_SET_FC_AQ_FAIL_UPDATE
+};
+
+/* Various MAC types */
+enum ice_mac_type {
+ ICE_MAC_UNKNOWN = 0,
+ ICE_MAC_GENERIC,
+};
+
+/* Media Types */
+enum ice_media_type {
+ ICE_MEDIA_UNKNOWN = 0,
+ ICE_MEDIA_FIBER,
+ ICE_MEDIA_BASET,
+ ICE_MEDIA_BACKPLANE,
+ ICE_MEDIA_DA,
+};
+
+enum ice_vsi_type {
+ ICE_VSI_PF = 0,
+};
+
+struct ice_link_status {
+ /* Refer to ice_aq_phy_type for bits definition */
+ u64 phy_type_low;
+ u16 max_frame_size;
+ u16 link_speed;
+ bool lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 pacing;
+ u8 req_speeds;
+ /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
+ * ice_aqc_get_phy_caps structure
+ */
+ u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* PHY info such as phy_type, etc... */
+struct ice_phy_info {
+ struct ice_link_status link_info;
+ struct ice_link_status link_info_old;
+ u64 phy_type_low;
+ enum ice_media_type media_type;
+ bool get_link_info;
+};
+
+/* Common HW capabilities for SW use */
+struct ice_hw_common_caps {
+ /* TX/RX queues */
+ u16 num_rxq; /* Number/Total RX queues */
+ u16 rxq_first_id; /* First queue ID for RX queues */
+ u16 num_txq; /* Number/Total TX queues */
+ u16 txq_first_id; /* First queue ID for TX queues */
+
+ /* MSI-X vectors */
+ u16 num_msix_vectors;
+ u16 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u16 max_mtu;
+
+ /* RSS related capabilities */
+ u16 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u8 rss_table_entry_width; /* RSS Entry width in bits */
+};
+
+/* Function specific capabilities */
+struct ice_hw_func_caps {
+ struct ice_hw_common_caps common_cap;
+ u32 guaranteed_num_vsi;
+};
+
+/* Device wide capabilities */
+struct ice_hw_dev_caps {
+ struct ice_hw_common_caps common_cap;
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+};
+
+/* MAC info */
+struct ice_mac_info {
+ u8 lan_addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+};
+
+/* Various RESET request, These are not tied with HW reset types */
+enum ice_reset_req {
+ ICE_RESET_PFR = 0,
+ ICE_RESET_CORER = 1,
+ ICE_RESET_GLOBR = 2,
+};
+
+/* Bus parameters */
+struct ice_bus_info {
+ u16 device;
+ u8 func;
+};
+
+/* Flow control (FC) parameters */
+struct ice_fc_info {
+ enum ice_fc_mode current_mode; /* FC mode in effect */
+ enum ice_fc_mode req_mode; /* FC mode requested by caller */
+};
+
+/* NVM Information */
+struct ice_nvm_info {
+ u32 eetrack; /* NVM data version */
+ u32 oem_ver; /* OEM version info */
+ u16 sr_words; /* Shadow RAM size in words */
+ u16 ver; /* NVM package version */
+ bool blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+/* Max number of port to queue branches w.r.t topology */
+#define ICE_MAX_TRAFFIC_CLASS 8
+#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
+
+struct ice_sched_node {
+ struct ice_sched_node *parent;
+ struct ice_sched_node *sibling; /* next sibling in the same layer */
+ struct ice_sched_node **children;
+ struct ice_aqc_txsched_elem_data info;
+ u32 agg_id; /* aggregator group id */
+ u16 vsi_id;
+ bool in_use; /* suspended or in use */
+ u8 tx_sched_layer; /* Logical Layer (1-9) */
+ u8 num_children;
+ u8 tc_num;
+ u8 owner;
+#define ICE_SCHED_NODE_OWNER_LAN 0
+};
+
+/* Access Macros for Tx Sched Elements data */
+#define ICE_TXSCHED_GET_NODE_TEID(x) le32_to_cpu((x)->info.node_teid)
+
+/* The aggregator type determines if identifier is for a VSI group,
+ * aggregator group, aggregator of queues, or queue group.
+ */
+enum ice_agg_type {
+ ICE_AGG_TYPE_UNKNOWN = 0,
+ ICE_AGG_TYPE_VSI,
+ ICE_AGG_TYPE_AGG, /* aggregator */
+ ICE_AGG_TYPE_Q,
+ ICE_AGG_TYPE_QG
+};
+
+#define ICE_SCHED_DFLT_RL_PROF_ID 0
+
+/* vsi type list entry to locate corresponding vsi/ag nodes */
+struct ice_sched_vsi_info {
+ struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
+ struct list_head list_entry;
+ u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
+ u16 vsi_id;
+};
+
+/* driver defines the policy */
+struct ice_sched_tx_policy {
+ u16 max_num_vsis;
+ u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
+ bool rdma_ena;
+};
+
+struct ice_port_info {
+ struct ice_sched_node *root; /* Root Node per Port */
+ struct ice_hw *hw; /* back pointer to hw instance */
+ u32 last_node_teid; /* scheduler last node info */
+ u16 sw_id; /* Initial switch ID belongs to port */
+ u16 pf_vf_num;
+ u8 port_state;
+#define ICE_SCHED_PORT_STATE_INIT 0x0
+#define ICE_SCHED_PORT_STATE_READY 0x1
+ u16 dflt_tx_vsi_rule_id;
+ u16 dflt_tx_vsi_num;
+ u16 dflt_rx_vsi_rule_id;
+ u16 dflt_rx_vsi_num;
+ struct ice_fc_info fc;
+ struct ice_mac_info mac;
+ struct ice_phy_info phy;
+ struct mutex sched_lock; /* protect access to TXSched tree */
+ struct ice_sched_tx_policy sched_policy;
+ struct list_head vsi_info_list;
+ struct list_head agg_list; /* lists all aggregator */
+ u8 lport;
+#define ICE_LPORT_MASK 0xff
+ bool is_vf;
+};
+
+struct ice_switch_info {
+ /* Switch VSI lists to MAC/VLAN translation */
+ struct mutex mac_list_lock; /* protect MAC list */
+ struct list_head mac_list_head;
+ struct mutex vlan_list_lock; /* protect VLAN list */
+ struct list_head vlan_list_head;
+ struct mutex eth_m_list_lock; /* protect ethtype list */
+ struct list_head eth_m_list_head;
+ struct mutex promisc_list_lock; /* protect promisc mode list */
+ struct list_head promisc_list_head;
+ struct mutex mac_vlan_list_lock; /* protect MAC-VLAN list */
+ struct list_head mac_vlan_list_head;
+
+ struct list_head vsi_list_map_head;
+};
+
+/* Port hardware description */
+struct ice_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+ struct ice_aqc_layer_props *layer_info;
+ struct ice_port_info *port_info;
+ u64 debug_mask; /* bitmap for debug mask */
+ enum ice_mac_type mac_type;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+
+ u8 pf_id; /* device profile info */
+
+ /* TX Scheduler values */
+ u16 num_tx_sched_layers;
+ u16 num_tx_sched_phys_layers;
+ u8 flattened_layers;
+ u8 max_cgds;
+ u8 sw_entry_point_layer;
+
+ bool evb_veb; /* true for VEB, false for VEPA */
+ struct ice_bus_info bus;
+ struct ice_nvm_info nvm;
+ struct ice_hw_dev_caps dev_caps; /* device capabilities */
+ struct ice_hw_func_caps func_caps; /* function capabilities */
+
+ struct ice_switch_info *switch_info; /* switch filter lists */
+
+ /* Control Queue info */
+ struct ice_ctl_q_info adminq;
+
+ u8 api_branch; /* API branch version */
+ u8 api_maj_ver; /* API major version */
+ u8 api_min_ver; /* API minor version */
+ u8 api_patch; /* API patch version */
+ u8 fw_branch; /* firmware branch version */
+ u8 fw_maj_ver; /* firmware major version */
+ u8 fw_min_ver; /* firmware minor version */
+ u8 fw_patch; /* firmware patch version */
+ u32 fw_build; /* firmware build number */
+
+ /* minimum allowed value for different speeds */
+#define ICE_ITR_GRAN_MIN_200 1
+#define ICE_ITR_GRAN_MIN_100 1
+#define ICE_ITR_GRAN_MIN_50 2
+#define ICE_ITR_GRAN_MIN_25 4
+ /* ITR granularity in 1 us */
+ u8 itr_gran_200;
+ u8 itr_gran_100;
+ u8 itr_gran_50;
+ u8 itr_gran_25;
+ bool ucast_shared; /* true if VSIs can share unicast addr */
+
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct ice_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct ice_hw_port_stats {
+ /* eth stats collected by the port */
+ struct ice_eth_stats eth;
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_len_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define ICE_SR_NVM_DEV_STARTER_VER 0x18
+#define ICE_SR_NVM_EETRACK_LO 0x2D
+#define ICE_SR_NVM_EETRACK_HI 0x2E
+#define ICE_NVM_VER_LO_SHIFT 0
+#define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT)
+#define ICE_NVM_VER_HI_SHIFT 12
+#define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT)
+#define ICE_OEM_VER_PATCH_SHIFT 0
+#define ICE_OEM_VER_PATCH_MASK (0xff << ICE_OEM_VER_PATCH_SHIFT)
+#define ICE_OEM_VER_BUILD_SHIFT 8
+#define ICE_OEM_VER_BUILD_MASK (0xffff << ICE_OEM_VER_BUILD_SHIFT)
+#define ICE_OEM_VER_SHIFT 24
+#define ICE_OEM_VER_MASK (0xff << ICE_OEM_VER_SHIFT)
+#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define ICE_SR_WORDS_IN_1KB 512
+
+#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 41b306fb90f8..bebe43b3a836 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -148,7 +148,7 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
&adapter->hw.mac.thermal_sensor_data.sensor[offset];
igb_attr->hw = &adapter->hw;
igb_attr->dev_attr.store = NULL;
- igb_attr->dev_attr.attr.mode = S_IRUGO;
+ igb_attr->dev_attr.attr.mode = 0444;
igb_attr->dev_attr.attr.name = igb_attr->name;
sysfs_attr_init(&igb_attr->dev_attr.attr);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index ef6df3d6437e..24766e125592 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -146,7 +146,7 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
&adapter->hw.mac.thermal_sensor_data.sensor[offset];
ixgbe_attr->hw = &adapter->hw;
ixgbe_attr->dev_attr.store = NULL;
- ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
+ ixgbe_attr->dev_attr.attr.mode = 0444;
ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
sysfs_attr_init(&ixgbe_attr->dev_attr.attr);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 25e9a551cc8c..eaa4bb80f1c9 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4655,8 +4655,8 @@ MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Rami Rosen <[email protected]>, Thomas Petazzoni <[email protected]>");
MODULE_LICENSE("GPL");
-module_param(rxq_number, int, S_IRUGO);
-module_param(txq_number, int, S_IRUGO);
+module_param(rxq_number, int, 0444);
+module_param(txq_number, int, 0444);
-module_param(rxq_def, int, S_IRUGO);
-module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
+module_param(rxq_def, int, 0444);
+module_param(rx_copybreak, int, 0644);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index f8bc3d4a39ff..7fc1bbf51c44 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1359,6 +1359,10 @@ static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
return readl(priv->swth_base[0] + offset);
}
+static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
+{
+ return readl_relaxed(priv->swth_base[0] + offset);
+}
/* These accessors should be used to access:
*
* - per-CPU registers, where each CPU has its own copy of the
@@ -1407,6 +1411,18 @@ static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
return readl(priv->swth_base[cpu] + offset);
}
+static void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
+ u32 offset, u32 data)
+{
+ writel_relaxed(data, priv->swth_base[cpu] + offset);
+}
+
+static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
+ u32 offset)
+{
+ return readl_relaxed(priv->swth_base[cpu] + offset);
+}
+
static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
@@ -1582,14 +1598,18 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
return 0;
}
-/* Read tcam entry from hw */
-static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+/* Initialize tcam entry from hw */
+static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
+ struct mvpp2_prs_entry *pe, int tid)
{
int i;
if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
return -EINVAL;
+ memset(pe, 0, sizeof(*pe));
+ pe->index = tid;
+
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
@@ -1913,16 +1933,11 @@ static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
}
/* Find parser flow entry */
-static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
+static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
{
- struct mvpp2_prs_entry *pe;
+ struct mvpp2_prs_entry pe;
int tid;
- pe = kzalloc(sizeof(*pe), GFP_KERNEL);
- if (!pe)
- return NULL;
- mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
-
/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
u8 bits;
@@ -1931,17 +1946,15 @@ static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
continue;
- pe->index = tid;
- mvpp2_prs_hw_read(priv, pe);
- bits = mvpp2_prs_sram_ai_get(pe);
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ bits = mvpp2_prs_sram_ai_get(&pe);
/* Sram store classification lookup ID in AI bits [5:0] */
if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
- return pe;
+ return tid;
}
- kfree(pe);
- return NULL;
+ return -ENOENT;
}
/* Return first free tcam index, seeking from start to end */
@@ -1971,8 +1984,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
/* Entry exist - update port only */
- pe.index = MVPP2_PE_DROP_ALL;
- mvpp2_prs_hw_read(priv, &pe);
+ mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -2020,8 +2032,7 @@ static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
/* promiscuous mode - Accept unknown unicast or multicast packets */
if (priv->prs_shadow[tid].valid) {
- pe.index = tid;
- mvpp2_prs_hw_read(priv, &pe);
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
memset(&pe, 0, sizeof(pe));
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
@@ -2071,8 +2082,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
if (priv->prs_shadow[tid].valid) {
/* Entry exist - update port only */
- pe.index = tid;
- mvpp2_prs_hw_read(priv, &pe);
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -2140,8 +2150,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
if (priv->prs_shadow[tid].valid) {
/* Entry exist - update port only */
- pe.index = tid;
- mvpp2_prs_hw_read(priv, &pe);
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
} else {
/* Entry doesn't exist - create new */
memset(&pe, 0, sizeof(pe));
@@ -2189,17 +2198,11 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
}
/* Search for existing single/triple vlan entry */
-static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
- unsigned short tpid, int ai)
+static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
{
- struct mvpp2_prs_entry *pe;
+ struct mvpp2_prs_entry pe;
int tid;
- pe = kzalloc(sizeof(*pe), GFP_KERNEL);
- if (!pe)
- return NULL;
- mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
-
/* Go through the all entries with MVPP2_PRS_LU_VLAN */
for (tid = MVPP2_PE_FIRST_FREE_TID;
tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
@@ -2210,19 +2213,17 @@ static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
continue;
- pe->index = tid;
-
- mvpp2_prs_hw_read(priv, pe);
- match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
if (!match)
continue;
/* Get vlan type */
- ri_bits = mvpp2_prs_sram_ri_get(pe);
+ ri_bits = mvpp2_prs_sram_ri_get(&pe);
ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
/* Get current ai value from tcam */
- ai_bits = mvpp2_prs_tcam_ai_get(pe);
+ ai_bits = mvpp2_prs_tcam_ai_get(&pe);
/* Clear double vlan bit */
ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
@@ -2231,34 +2232,31 @@ static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
- return pe;
+ return tid;
}
- kfree(pe);
- return NULL;
+ return -ENOENT;
}
/* Add/update single/triple vlan entry */
static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
unsigned int port_map)
{
- struct mvpp2_prs_entry *pe;
+ struct mvpp2_prs_entry pe;
int tid_aux, tid;
int ret = 0;
- pe = mvpp2_prs_vlan_find(priv, tpid, ai);
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_vlan_find(priv, tpid, ai);
- if (!pe) {
+ if (tid < 0) {
/* Create new tcam entry */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
MVPP2_PE_FIRST_FREE_TID);
if (tid < 0)
return tid;
- pe = kzalloc(sizeof(*pe), GFP_KERNEL);
- if (!pe)
- return -ENOMEM;
-
/* Get last double vlan tid */
for (tid_aux = MVPP2_PE_LAST_FREE_TID;
tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
@@ -2268,49 +2266,46 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
continue;
- pe->index = tid_aux;
- mvpp2_prs_hw_read(priv, pe);
- ri_bits = mvpp2_prs_sram_ri_get(pe);
+ mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ ri_bits = mvpp2_prs_sram_ri_get(&pe);
if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
MVPP2_PRS_RI_VLAN_DOUBLE)
break;
}
- if (tid <= tid_aux) {
- ret = -EINVAL;
- goto free_pe;
- }
+ if (tid <= tid_aux)
+ return -EINVAL;
- memset(pe, 0, sizeof(*pe));
- mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
- pe->index = tid;
+ memset(&pe, 0, sizeof(pe));
+ pe.index = tid;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
- mvpp2_prs_match_etype(pe, 0, tpid);
+ mvpp2_prs_match_etype(&pe, 0, tpid);
/* VLAN tag detected, proceed with VID filtering */
- mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VID);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
/* Clear all ai bits for next iteration */
- mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
- mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
MVPP2_PRS_RI_VLAN_MASK);
} else {
ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
- mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
MVPP2_PRS_RI_VLAN_MASK);
}
- mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
- mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Update ports' mask */
- mvpp2_prs_tcam_port_map_set(pe, port_map);
+ mvpp2_prs_tcam_port_map_set(&pe, port_map);
- mvpp2_prs_hw_write(priv, pe);
-free_pe:
- kfree(pe);
+ mvpp2_prs_hw_write(priv, &pe);
return ret;
}
@@ -2329,18 +2324,12 @@ static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
}
/* Search for existing double vlan entry */
-static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
- unsigned short tpid1,
- unsigned short tpid2)
+static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
+ unsigned short tpid2)
{
- struct mvpp2_prs_entry *pe;
+ struct mvpp2_prs_entry pe;
int tid;
- pe = kzalloc(sizeof(*pe), GFP_KERNEL);
- if (!pe)
- return NULL;
- mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
-
/* Go through the all entries with MVPP2_PRS_LU_VLAN */
for (tid = MVPP2_PE_FIRST_FREE_TID;
tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
@@ -2351,22 +2340,20 @@ static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
continue;
- pe->index = tid;
- mvpp2_prs_hw_read(priv, pe);
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
- match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
- && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
+ match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
+ mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
if (!match)
continue;
- ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
+ ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
- return pe;
+ return tid;
}
- kfree(pe);
- return NULL;
+ return -ENOENT;
}
/* Add or update double vlan entry */
@@ -2374,28 +2361,24 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
unsigned short tpid2,
unsigned int port_map)
{
- struct mvpp2_prs_entry *pe;
int tid_aux, tid, ai, ret = 0;
+ struct mvpp2_prs_entry pe;
- pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
+ memset(&pe, 0, sizeof(pe));
- if (!pe) {
+ tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
+
+ if (tid < 0) {
/* Create new tcam entry */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
MVPP2_PE_LAST_FREE_TID);
if (tid < 0)
return tid;
- pe = kzalloc(sizeof(*pe), GFP_KERNEL);
- if (!pe)
- return -ENOMEM;
-
/* Set ai value for new double vlan entry */
ai = mvpp2_prs_double_vlan_ai_free_get(priv);
- if (ai < 0) {
- ret = ai;
- goto free_pe;
- }
+ if (ai < 0)
+ return ai;
/* Get first single/triple vlan tid */
for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
@@ -2406,46 +2389,44 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
continue;
- pe->index = tid_aux;
- mvpp2_prs_hw_read(priv, pe);
- ri_bits = mvpp2_prs_sram_ri_get(pe);
+ mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
+ ri_bits = mvpp2_prs_sram_ri_get(&pe);
ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
break;
}
- if (tid >= tid_aux) {
- ret = -ERANGE;
- goto free_pe;
- }
+ if (tid >= tid_aux)
+ return -ERANGE;
- memset(pe, 0, sizeof(*pe));
- mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
- pe->index = tid;
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ pe.index = tid;
priv->prs_double_vlans[ai] = true;
- mvpp2_prs_match_etype(pe, 0, tpid1);
- mvpp2_prs_match_etype(pe, 4, tpid2);
+ mvpp2_prs_match_etype(&pe, 0, tpid1);
+ mvpp2_prs_match_etype(&pe, 4, tpid2);
- mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
/* Shift 4 bytes - skip outer vlan tag */
- mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
MVPP2_PRS_RI_VLAN_MASK);
- mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
+ mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
MVPP2_PRS_SRAM_AI_MASK);
- mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Update ports' mask */
- mvpp2_prs_tcam_port_map_set(pe, port_map);
- mvpp2_prs_hw_write(priv, pe);
-free_pe:
- kfree(pe);
+ mvpp2_prs_tcam_port_map_set(&pe, port_map);
+ mvpp2_prs_hw_write(priv, &pe);
+
return ret;
}
@@ -3513,9 +3494,8 @@ static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
continue;
- pe.index = tid;
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
- mvpp2_prs_hw_read(priv, &pe);
mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
@@ -3528,7 +3508,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
return tid;
}
- return 0;
+ return -ENOENT;
}
/* Write parser entry for VID filtering */
@@ -3541,6 +3521,8 @@ static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
struct mvpp2_prs_entry pe;
int tid;
+ memset(&pe, 0, sizeof(pe));
+
/* Scan TCAM and see if entry with this <vid,port> already exist */
tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
@@ -3551,8 +3533,7 @@ static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
shift = MVPP2_VLAN_TAG_LEN;
/* No such entry */
- if (!tid) {
- memset(&pe, 0, sizeof(pe));
+ if (tid < 0) {
/* Go through all entries from first to last in vlan range */
tid = mvpp2_prs_tcam_first_free(priv, vid_start,
@@ -3569,7 +3550,7 @@ static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, 0);
} else {
- mvpp2_prs_hw_read(priv, &pe);
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
}
/* Enable the current port */
@@ -3604,7 +3585,7 @@ static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
/* No such entry */
- if (tid)
+ if (tid < 0)
return;
mvpp2_prs_hw_inv(priv, tid);
@@ -3771,18 +3752,13 @@ static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
}
/* Find tcam entry with matched pair <MAC DA, port> */
-static struct mvpp2_prs_entry *
+static int
mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
unsigned char *mask, int udf_type)
{
- struct mvpp2_prs_entry *pe;
+ struct mvpp2_prs_entry pe;
int tid;
- pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
- if (!pe)
- return NULL;
- mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
-
/* Go through the all entires with MVPP2_PRS_LU_MAC */
for (tid = MVPP2_PE_MAC_RANGE_START;
tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
@@ -3793,17 +3769,15 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
(priv->prs_shadow[tid].udf != udf_type))
continue;
- pe->index = tid;
- mvpp2_prs_hw_read(priv, pe);
- entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
+ entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
- if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
+ if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
entry_pmap == pmap)
- return pe;
+ return tid;
}
- kfree(pe);
- return NULL;
+ return -ENOENT;
}
/* Update parser's mac da entry */
@@ -3813,15 +3787,17 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da,
unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
struct mvpp2 *priv = port->priv;
unsigned int pmap, len, ri;
- struct mvpp2_prs_entry *pe;
+ struct mvpp2_prs_entry pe;
int tid;
+ memset(&pe, 0, sizeof(pe));
+
/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
- pe = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
- MVPP2_PRS_UDF_MAC_DEF);
+ tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
+ MVPP2_PRS_UDF_MAC_DEF);
/* No such entry */
- if (!pe) {
+ if (tid < 0) {
if (!add)
return 0;
@@ -3833,39 +3809,37 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da,
if (tid < 0)
return tid;
- pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
- if (!pe)
- return -ENOMEM;
- mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
- pe->index = tid;
+ pe.index = tid;
/* Mask all ports */
- mvpp2_prs_tcam_port_map_set(pe, 0);
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ } else {
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
}
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
/* Update port mask */
- mvpp2_prs_tcam_port_set(pe, port->id, add);
+ mvpp2_prs_tcam_port_set(&pe, port->id, add);
/* Invalidate the entry if no ports are left enabled */
- pmap = mvpp2_prs_tcam_port_map_get(pe);
+ pmap = mvpp2_prs_tcam_port_map_get(&pe);
if (pmap == 0) {
- if (add) {
- kfree(pe);
+ if (add)
return -EINVAL;
- }
- mvpp2_prs_hw_inv(priv, pe->index);
- priv->prs_shadow[pe->index].valid = false;
- kfree(pe);
+
+ mvpp2_prs_hw_inv(priv, pe.index);
+ priv->prs_shadow[pe.index].valid = false;
return 0;
}
/* Continue - set next lookup */
- mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
/* Set match on DA */
len = ETH_ALEN;
while (len--)
- mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
/* Set result info bits */
if (is_broadcast_ether_addr(da)) {
@@ -3879,21 +3853,19 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da,
ri |= MVPP2_PRS_RI_MAC_ME_MASK;
}
- mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+ mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
MVPP2_PRS_RI_MAC_ME_MASK);
- mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+ mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
MVPP2_PRS_RI_MAC_ME_MASK);
/* Shift to ethertype */
- mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
+ mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Update shadow table and hw entry */
- priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
- mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
- mvpp2_prs_hw_write(priv, pe);
-
- kfree(pe);
+ priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
return 0;
}
@@ -3935,8 +3907,7 @@ static void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
(priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
continue;
- pe.index = tid;
- mvpp2_prs_hw_read(priv, &pe);
+ mvpp2_prs_init_from_hw(priv, &pe, tid);
pmap = mvpp2_prs_tcam_port_map_get(&pe);
@@ -4014,13 +3985,15 @@ static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
/* Set prs flow for the port */
static int mvpp2_prs_def_flow(struct mvpp2_port *port)
{
- struct mvpp2_prs_entry *pe;
+ struct mvpp2_prs_entry pe;
int tid;
- pe = mvpp2_prs_flow_find(port->priv, port->id);
+ memset(&pe, 0, sizeof(pe));
+
+ tid = mvpp2_prs_flow_find(port->priv, port->id);
/* Such entry not exist */
- if (!pe) {
+ if (tid < 0) {
/* Go through the all entires from last to first */
tid = mvpp2_prs_tcam_first_free(port->priv,
MVPP2_PE_LAST_FREE_TID,
@@ -4028,24 +4001,21 @@ static int mvpp2_prs_def_flow(struct mvpp2_port *port)
if (tid < 0)
return tid;
- pe = kzalloc(sizeof(*pe), GFP_KERNEL);
- if (!pe)
- return -ENOMEM;
-
- mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
- pe->index = tid;
+ pe.index = tid;
/* Set flow ID*/
- mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
- mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+ mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
/* Update shadow table */
- mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
+ } else {
+ mvpp2_prs_init_from_hw(port->priv, &pe, tid);
}
- mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
- mvpp2_prs_hw_write(port->priv, pe);
- kfree(pe);
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
+ mvpp2_prs_hw_write(port->priv, &pe);
return 0;
}
@@ -4488,8 +4458,8 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
- mvpp2_percpu_write(port->priv, cpu,
- MVPP22_BM_ADDR_HIGH_RLS_REG, val);
+ mvpp2_percpu_write_relaxed(port->priv, cpu,
+ MVPP22_BM_ADDR_HIGH_RLS_REG, val);
}
/* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
@@ -4497,10 +4467,10 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
* descriptor. Instead of storing the virtual address, we
* store the physical address
*/
- mvpp2_percpu_write(port->priv, cpu,
- MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
- mvpp2_percpu_write(port->priv, cpu,
- MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+ mvpp2_percpu_write_relaxed(port->priv, cpu,
+ MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
+ mvpp2_percpu_write_relaxed(port->priv, cpu,
+ MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
put_cpu();
}
@@ -5592,7 +5562,8 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
/* Update number of occupied aggregated Tx descriptors */
int cpu = smp_processor_id();
- u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
+ u32 val = mvpp2_read_relaxed(priv,
+ MVPP2_AGGR_TXQ_STATUS_REG(cpu));
aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
}
@@ -5616,9 +5587,9 @@ static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
int cpu = smp_processor_id();
val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
- mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
+ mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
- val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
+ val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
return val & MVPP2_TXQ_RSVD_RSLT_MASK;
}
@@ -5723,8 +5694,8 @@ static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
u32 val;
/* Reading status reg resets transmitted descriptor counter */
- val = mvpp2_percpu_read(port->priv, smp_processor_id(),
- MVPP2_TXQ_SENT_REG(txq->id));
+ val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
+ MVPP2_TXQ_SENT_REG(txq->id));
return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
MVPP2_TRANSMITTED_COUNT_OFFSET;
@@ -7090,8 +7061,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
*
* Each CPU has its own Rx/Tx cause register
*/
- cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+ cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
+ MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
if (cause_misc) {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 31efc47c847e..9c08c3650c02 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3783,7 +3783,7 @@ static int skge_device_event(struct notifier_block *unused,
break;
case NETDEV_UP:
- d = debugfs_create_file(dev->name, S_IRUGO,
+ d = debugfs_create_file(dev->name, 0444,
skge_debug, dev,
&skge_debug_fops);
if (!d || IS_ERR(d))
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 9fe85300e7b6..9b77db7c13d0 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4667,7 +4667,7 @@ static int sky2_device_event(struct notifier_block *unused,
break;
case NETDEV_UP:
- sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO,
+ sky2->debugfs = debugfs_create_file(dev->name, 0444,
sky2_debug, dev,
&sky2_debug_fops);
if (IS_ERR(sky2->debugfs))
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 4d84cab77105..5a26851b4ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2993,10 +2993,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
- if (mlx4_is_mfunc(dev))
- info->port_attr.attr.mode = S_IRUGO;
- else {
- info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (mlx4_is_mfunc(dev)) {
+ info->port_attr.attr.mode = 0444;
+ } else {
+ info->port_attr.attr.mode = 0644;
info->port_attr.store = set_port_type;
}
info->port_attr.show = show_port_type;
@@ -3011,10 +3011,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
info->port_mtu_attr.attr.name = info->dev_mtu_name;
- if (mlx4_is_mfunc(dev))
- info->port_mtu_attr.attr.mode = S_IRUGO;
- else {
- info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (mlx4_is_mfunc(dev)) {
+ info->port_mtu_attr.attr.mode = 0444;
+ } else {
+ info->port_mtu_attr.attr.mode = 0644;
info->port_mtu_attr.store = set_port_ib_mtu;
}
info->port_mtu_attr.show = show_port_ib_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e9a1fbcc4adf..21cd1703a862 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -359,6 +359,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+ case MLX5_CMD_OP_QUERY_VNIC_ENV:
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER:
@@ -501,6 +502,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
@@ -1802,7 +1804,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->checksum_disabled = 1;
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
- cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
+ cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
if (cmd->cmdif_rev > CMD_IF_REV) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index a6ba57fbb414..09f178a3fcab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -136,6 +136,8 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\
{MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
+ {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\
+ {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
TRACE_EVENT(mlx5_fs_set_fte,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 4c9360b25532..353ac6daa3dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -93,8 +93,6 @@
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
-#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
- MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
#define MLX5E_REQUIRED_MTTS(wqes) \
@@ -124,6 +122,7 @@
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
#define MLX5E_ICOSQ_MAX_WQEBBS \
(DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
@@ -207,12 +206,14 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
"rx_cqe_moder",
"tx_cqe_moder",
"rx_cqe_compress",
+ "rx_striding_rq",
};
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
+ MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
};
#define MLX5E_SET_PFLAG(params, pflag, enable) \
@@ -232,9 +233,6 @@ enum mlx5e_priv_flag {
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
- u16 rq_headroom;
- u8 mpwqe_log_stride_sz;
- u8 mpwqe_log_num_strides;
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
@@ -243,7 +241,6 @@ struct mlx5e_params {
struct net_dim_cq_moder tx_cq_moderation;
bool lro_en;
u32 lro_wqe_sz;
- u16 tx_max_inline;
u8 tx_min_inline_mode;
u8 rss_hfunc;
u8 toeplitz_hash_key[40];
@@ -336,6 +333,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_ENABLED,
+ MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
};
@@ -369,7 +367,6 @@ struct mlx5e_txqsq {
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
- u16 max_inline;
u8 min_inline_mode;
u16 edge;
struct device *pdev;
@@ -383,6 +380,10 @@ struct mlx5e_txqsq {
struct mlx5e_channel *channel;
int txq_ix;
u32 rate_limit;
+ struct mlx5e_txqsq_recover {
+ struct work_struct recover_work;
+ u64 last_recover;
+ } recover;
} ____cacheline_aligned_in_smp;
struct mlx5e_xdpsq {
@@ -781,7 +782,8 @@ struct mlx5e_priv {
struct net_device *netdev;
struct mlx5e_stats stats;
struct hwtstamp_config tstamp;
- u16 q_counter;
+ u16 q_counter;
+ u16 drop_rq_q_counter;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
#endif
@@ -831,6 +833,10 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
+bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -841,6 +847,11 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
+u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+
void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
@@ -916,9 +927,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
+void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- u8 rq_type);
+ struct mlx5e_params *params);
static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{
@@ -1010,7 +1021,6 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#endif
-u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
int mlx5e_create_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir, u32 *in, int inlen);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
@@ -1061,7 +1071,6 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
-u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
int mlx5e_bits_invert(unsigned long a, int size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index cc8048f68f11..c57c929d7973 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -203,9 +203,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
{
int i, idx = 0;
- if (!data)
- return;
-
mutex_lock(&priv->state_lock);
mlx5e_update_stats(priv);
mutex_unlock(&priv->state_lock);
@@ -234,8 +231,8 @@ static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_wqe;
- stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
- num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
+ stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params);
+ num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params);
wqe_size = stride_size * num_strides;
packets_per_wqe = wqe_size /
@@ -255,8 +252,8 @@ static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_packets;
- stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
- num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
+ stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params);
+ num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params);
wqe_size = stride_size * num_strides;
num_packets = (1 << order_base_2(num_packets));
@@ -1066,16 +1063,66 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
return err;
}
+#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
+#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
+#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
+#define MLX5E_PFC_PREVEN_TOUT_MIN_MSEC 80
+#define MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout) \
+ max_t(u16, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC, \
+ (critical_tout * MLX5E_PFC_PREVEN_MINOR_PRECENT) / 100)
+
+static int mlx5e_get_pfc_prevention_tout(struct net_device *netdev,
+ u16 *pfc_prevention_tout)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) ||
+ !MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
+ return -EOPNOTSUPP;
+
+ return mlx5_query_port_stall_watermark(mdev, pfc_prevention_tout, NULL);
+}
+
+static int mlx5e_set_pfc_prevention_tout(struct net_device *netdev,
+ u16 pfc_preven)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 critical_tout;
+ u16 minor;
+
+ if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) ||
+ !MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
+ return -EOPNOTSUPP;
+
+ critical_tout = (pfc_preven == PFC_STORM_PREVENTION_AUTO) ?
+ MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC :
+ pfc_preven;
+
+ if (critical_tout != PFC_STORM_PREVENTION_DISABLE &&
+ (critical_tout > MLX5E_PFC_PREVEN_TOUT_MAX_MSEC ||
+ critical_tout < MLX5E_PFC_PREVEN_TOUT_MIN_MSEC)) {
+ netdev_info(netdev, "%s: pfc prevention tout not in range (%d-%d)\n",
+ __func__, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC,
+ MLX5E_PFC_PREVEN_TOUT_MAX_MSEC);
+ return -EINVAL;
+ }
+
+ minor = MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout);
+ return mlx5_set_port_stall_watermark(mdev, critical_tout,
+ minor);
+}
+
static int mlx5e_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
void *data)
{
- const struct mlx5e_priv *priv = netdev_priv(dev);
- int err = 0;
+ int err;
switch (tuna->id) {
- case ETHTOOL_TX_COPYBREAK:
- *(u32 *)data = priv->channels.params.tx_max_inline;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ err = mlx5e_get_pfc_prevention_tout(dev, data);
break;
default:
err = -EINVAL;
@@ -1090,34 +1137,13 @@ static int mlx5e_set_tunable(struct net_device *dev,
const void *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
- int err = 0;
- u32 val;
+ int err;
mutex_lock(&priv->state_lock);
switch (tuna->id) {
- case ETHTOOL_TX_COPYBREAK:
- val = *(u32 *)data;
- if (val > mlx5e_get_max_inline_cap(mdev)) {
- err = -EINVAL;
- break;
- }
-
- new_channels.params = priv->channels.params;
- new_channels.params.tx_max_inline = val;
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- break;
- }
-
- err = mlx5e_open_channels(priv, &new_channels);
- if (err)
- break;
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
-
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ err = mlx5e_set_pfc_prevention_tout(dev, *(u16 *)data);
break;
default:
err = -EINVAL;
@@ -1507,11 +1533,6 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
- new_channels.params.mpwqe_log_stride_sz =
- MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
- new_channels.params.mpwqe_log_num_strides =
- MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
-
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
return 0;
@@ -1549,6 +1570,38 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
return 0;
}
+static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channels new_channels = {};
+ int err;
+
+ if (enable) {
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
+ return -EOPNOTSUPP;
+ if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
+ return -EINVAL;
+ }
+
+ new_channels.params = priv->channels.params;
+
+ MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
+ mlx5e_set_rq_type(mdev, &new_channels.params);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ return 0;
+ }
+
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ return err;
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ return 0;
+}
+
static int mlx5e_handle_pflag(struct net_device *netdev,
u32 wanted_flags,
enum mlx5e_priv_flag flag,
@@ -1594,6 +1647,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_CQE_COMPRESS,
set_pflag_rx_cqe_compress);
+ if (err)
+ goto out;
+
+ err = mlx5e_handle_pflag(netdev, pflags,
+ MLX5E_PFLAG_RX_STRIDING_RQ,
+ set_pflag_rx_striding_rq);
out:
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index da94c8cba5ee..1b48dec67abf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -71,56 +71,80 @@ struct mlx5e_channel_param {
struct mlx5e_cq_param icosq_cq;
};
-static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, striding_rq) &&
MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
MLX5_CAP_ETH(mdev, reg_umr_sq);
}
+u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return MLX5E_MPWQE_STRIDE_SZ(mdev,
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
+}
+
+u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return MLX5_MPWRQ_LOG_WQE_SZ -
+ mlx5e_mpwqe_get_log_stride_size(mdev, params);
+}
+
+static u16 mlx5e_get_rq_headroom(struct mlx5e_params *params)
+{
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)
+ return linear_rq_headroom;
+
+ return 0;
+}
+
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params, u8 rq_type)
+ struct mlx5e_params *params)
{
- params->rq_wq_type = rq_type;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
- params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
- params->mpwqe_log_stride_sz;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
- params->rq_headroom = params->xdp_prog ?
- XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
- params->rq_headroom += NET_IP_ALIGN;
/* Extra room needed for build_skb */
- params->lro_wqe_sz -= params->rq_headroom +
+ params->lro_wqe_sz -= mlx5e_get_rq_headroom(params) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
BIT(params->log_rq_size),
- BIT(params->mpwqe_log_stride_sz),
+ BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
+bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
- u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
- !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
- MLX5_WQ_TYPE_LINKED_LIST;
- mlx5e_init_rq_type_params(mdev, params, rq_type);
+ return mlx5e_check_fragmented_striding_rq_cap(mdev) &&
+ !params->xdp_prog && !MLX5_IPSEC_DEV(mdev);
+}
+
+void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+ params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
}
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -153,26 +177,6 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-static void mlx5e_tx_timeout_work(struct work_struct *work)
-{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- tx_timeout_work);
- int err;
-
- rtnl_lock();
- mutex_lock(&priv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
- mlx5e_close_locked(priv->netdev);
- err = mlx5e_open_locked(priv->netdev);
- if (err)
- netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
- err);
-unlock:
- mutex_unlock(&priv->state_lock);
- rtnl_unlock();
-}
-
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
int i;
@@ -428,7 +432,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
- rq->buff.headroom = params->rq_headroom;
+ rq->buff.headroom = mlx5e_get_rq_headroom(params);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -450,8 +454,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
- rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
+ rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
+ rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
@@ -615,8 +619,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
int next_state)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
@@ -953,6 +956,7 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
return 0;
}
+static void mlx5e_sq_recover(struct work_struct *work);
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
struct mlx5e_params *params,
@@ -970,8 +974,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->channel = c;
sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
- sq->max_inline = params->tx_max_inline;
sq->min_inline_mode = params->tx_min_inline_mode;
+ INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
@@ -1038,6 +1042,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
@@ -1156,9 +1161,20 @@ err_free_txqsq:
return err;
}
+static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
+{
+ WARN_ONCE(sq->cc != sq->pc,
+ "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
+ sq->sqn, sq->cc, sq->pc);
+ sq->cc = 0;
+ sq->dma_fifo_cc = 0;
+ sq->pc = 0;
+}
+
static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
{
sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
@@ -1203,6 +1219,107 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
mlx5e_free_txqsq(sq);
}
+static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
+{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
+
+ while (time_before(jiffies, exp_time)) {
+ if (sq->cc == sq->pc)
+ return 0;
+
+ msleep(20);
+ }
+
+ netdev_err(sq->channel->netdev,
+ "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
+ sq->sqn, sq->cc, sq->pc);
+
+ return -ETIMEDOUT;
+}
+
+static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
+{
+ struct mlx5_core_dev *mdev = sq->channel->mdev;
+ struct net_device *dev = sq->channel->netdev;
+ struct mlx5e_modify_sq_param msp = {0};
+ int err;
+
+ msp.curr_state = curr_state;
+ msp.next_state = MLX5_SQC_STATE_RST;
+
+ err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
+ return err;
+ }
+
+ memset(&msp, 0, sizeof(msp));
+ msp.curr_state = MLX5_SQC_STATE_RST;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+
+ err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5e_sq_recover(struct work_struct *work)
+{
+ struct mlx5e_txqsq_recover *recover =
+ container_of(work, struct mlx5e_txqsq_recover,
+ recover_work);
+ struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq,
+ recover);
+ struct mlx5_core_dev *mdev = sq->channel->mdev;
+ struct net_device *dev = sq->channel->netdev;
+ u8 state;
+ int err;
+
+ err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
+ sq->sqn, err);
+ return;
+ }
+
+ if (state != MLX5_RQC_STATE_ERR) {
+ netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
+ return;
+ }
+
+ netif_tx_disable_queue(sq->txq);
+
+ if (mlx5e_wait_for_sq_flush(sq))
+ return;
+
+ /* If the interval between two consecutive recovers per SQ is too
+ * short, don't recover to avoid infinite loop of ERR_CQE -> recover.
+ * If we reached this state, there is probably a bug that needs to be
+ * fixed. let's keep the queue close and let tx timeout cleanup.
+ */
+ if (jiffies_to_msecs(jiffies - recover->last_recover) <
+ MLX5E_SQ_RECOVER_MIN_INTERVAL) {
+ netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n",
+ sq->sqn);
+ return;
+ }
+
+ /* At this point, no new packets will arrive from the stack as TXQ is
+ * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
+ * pending WQEs. SQ can safely reset the SQ.
+ */
+ if (mlx5e_sq_to_ready(sq, state))
+ return;
+
+ mlx5e_reset_txqsq_cc_pc(sq);
+ sq->stats.recover++;
+ recover->last_recover = jiffies;
+ mlx5e_activate_txqsq(sq);
+}
+
static int mlx5e_open_icosq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
@@ -1743,13 +1860,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_rq_param *param)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
- MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ mlx5e_mpwqe_get_log_num_strides(mdev, params) - 9);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ mlx5e_mpwqe_get_log_stride_size(mdev, params) - 6);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
@@ -1759,23 +1879,25 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
- MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
param->wq.linear = 1;
}
-static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
+static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
struct mlx5e_rq_param *param)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
}
@@ -1821,7 +1943,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
+ log_cq_size = params->log_rq_size +
+ mlx5e_mpwqe_get_log_num_strides(priv->mdev, params);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
log_cq_size = params->log_rq_size;
@@ -2643,15 +2766,16 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
return mlx5e_alloc_cq_common(mdev, param, cq);
}
-static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_cq_param cq_param = {};
struct mlx5e_rq_param rq_param = {};
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- mlx5e_build_drop_rq_param(mdev, &rq_param);
+ mlx5e_build_drop_rq_param(priv, &rq_param);
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
if (err)
@@ -2669,6 +2793,10 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
if (err)
goto err_free_rq;
+ err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
+
return 0;
err_free_rq:
@@ -3236,24 +3364,20 @@ static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t oper_features = netdev->features;
- int err;
+ int err = 0;
- err = mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_LRO, set_feature_lro);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_HW_VLAN_CTAG_FILTER,
+#define MLX5E_HANDLE_FEATURE(feature, handler) \
+ mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
+
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_HW_TC, set_feature_tc_num_filters);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_RXALL, set_feature_rx_all);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_RXFCS, set_feature_rx_fcs);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_NTUPLE, set_feature_arfs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
#endif
if (err) {
@@ -3629,13 +3753,19 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
return true;
}
-static void mlx5e_tx_timeout(struct net_device *dev)
+static void mlx5e_tx_timeout_work(struct work_struct *work)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ tx_timeout_work);
+ struct net_device *dev = priv->netdev;
bool reopen_channels = false;
- int i;
+ int i, err;
- netdev_err(dev, "TX timeout detected\n");
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
@@ -3643,7 +3773,9 @@ static void mlx5e_tx_timeout(struct net_device *dev)
if (!netif_xmit_stopped(dev_queue))
continue;
- netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+
+ netdev_err(dev,
+ "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
jiffies_to_usecs(jiffies - dev_queue->trans_start));
@@ -3656,8 +3788,27 @@ static void mlx5e_tx_timeout(struct net_device *dev)
}
}
- if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
- schedule_work(&priv->tx_timeout_work);
+ if (!reopen_channels)
+ goto unlock;
+
+ mlx5e_close_locked(dev);
+ err = mlx5e_open_locked(dev);
+ if (err)
+ netdev_err(priv->netdev,
+ "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ err);
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+}
+
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ netdev_err(dev, "TX timeout detected\n");
+ queue_work(priv->wq, &priv->tx_timeout_work);
}
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
@@ -3707,7 +3858,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
bpf_prog_put(old_prog);
if (reset) /* change RQ type according to priv->xdp_prog */
- mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
+ mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
if (was_opened && reset)
mlx5e_open_locked(netdev);
@@ -3852,15 +4003,6 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0;
}
-u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
-{
- int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
-
- return bf_buf_size -
- sizeof(struct mlx5e_tx_wqe) +
- 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
-}
-
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels)
{
@@ -3900,16 +4042,20 @@ static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
return 0;
}
-static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
+static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
{
- return (link_speed && pci_bw &&
- (pci_bw < 40000) && (pci_bw < link_speed));
-}
+ u32 link_speed = 0;
+ u32 pci_bw = 0;
-static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
-{
- return !(link_speed && pci_bw &&
- (pci_bw <= 16000) && (pci_bw < link_speed));
+ mlx5e_get_max_linkspeed(mdev, &link_speed);
+ mlx5e_get_pci_bw(mdev, &pci_bw);
+ mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
+
+#define MLX5E_SLOW_PCI_RATIO (2)
+
+ return link_speed && pci_bw &&
+ link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
@@ -3961,7 +4107,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
-u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
@@ -3978,17 +4124,10 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
u16 max_channels)
{
u8 cq_period_mode = 0;
- u32 link_speed = 0;
- u32 pci_bw = 0;
params->num_channels = max_channels;
params->num_tc = 1;
- mlx5e_get_max_linkspeed(mdev, &link_speed);
- mlx5e_get_pci_bw(mdev, &pci_bw);
- mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
- link_speed, pci_bw);
-
/* SQ */
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
@@ -3998,18 +4137,22 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
MLX5_CAP_GEN(mdev, vport_group_manager))
- params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
+ params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
/* RQ */
- mlx5e_set_rq_params(mdev, params);
+ if (mlx5e_striding_rq_possible(mdev, params))
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ,
+ !slow_pci_heuristic(mdev));
+ mlx5e_set_rq_type(mdev, params);
+ mlx5e_init_rq_type_params(mdev, params);
/* HW LRO */
/* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
+ params->lro_en = !slow_pci_heuristic(mdev);
params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
@@ -4021,7 +4164,6 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
/* TX inline */
- params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
@@ -4104,6 +4246,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_RXCSUM;
netdev->vlan_features |= NETIF_F_RXHASH;
+ netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
+
if (!!MLX5_CAP_ETH(mdev, lro_cap))
netdev->vlan_features |= NETIF_F_LRO;
@@ -4183,7 +4328,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_ipsec_build_netdev(priv);
}
-static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
+static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -4193,14 +4338,21 @@ static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
priv->q_counter = 0;
}
+
+ err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
+ if (err) {
+ mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
+ priv->drop_rq_q_counter = 0;
+ }
}
-static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
+static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
- if (!priv->q_counter)
- return;
+ if (priv->q_counter)
+ mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
- mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+ if (priv->drop_rq_q_counter)
+ mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
}
static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
@@ -4439,18 +4591,18 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
if (err)
goto out;
- err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
+ mlx5e_create_q_counters(priv);
+
+ err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
- goto err_cleanup_tx;
+ goto err_destroy_q_counters;
}
err = profile->init_rx(priv);
if (err)
goto err_close_drop_rq;
- mlx5e_create_q_counter(priv);
-
if (profile->enable)
profile->enable(priv);
@@ -4459,7 +4611,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
-err_cleanup_tx:
+err_destroy_q_counters:
+ mlx5e_destroy_q_counters(priv);
profile->cleanup_tx(priv);
out:
@@ -4476,9 +4629,9 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
profile->disable(priv);
flush_workqueue(priv->wq);
- mlx5e_destroy_q_counter(priv);
profile->cleanup_rx(priv);
mlx5e_close_drop_rq(&priv->drop_rq);
+ mlx5e_destroy_q_counters(priv);
profile->cleanup_tx(priv);
cancel_delayed_work_sync(&priv->update_stats_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index ea4b255380a2..dd32f3e390ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -884,7 +884,6 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
- params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
params->num_tc = 1;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 8cce90dc461d..781b8f21d6d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -333,9 +333,8 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
len = ALIGN(headlen_pg, sizeof(long));
dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
DMA_FROM_DEVICE);
- skb_copy_to_linear_data_offset(skb, 0,
- page_address(dma_info->page) + offset,
- len);
+ skb_copy_to_linear_data(skb, page_address(dma_info->page) + offset, len);
+
if (unlikely(offset + headlen > PAGE_SIZE)) {
dma_info++;
headlen_pg = len;
@@ -870,10 +869,8 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- dma_sync_single_range_for_cpu(rq->pdev,
- di->addr + wi->offset,
- 0, frag_size,
- DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
+ frag_size, DMA_FROM_DEVICE);
prefetch(data);
wi->offset += frag_size;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 5f0f3493d747..b08c94422907 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -60,6 +60,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
@@ -153,6 +155,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_cqe_err += sq_stats->cqe_err;
+ s->tx_recover += sq_stats->recover;
s->tx_xmit_more += sq_stats->xmit_more;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
s->tx_csum_none += sq_stats->csum_none;
@@ -170,11 +174,24 @@ static const struct counter_desc q_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
};
+static const struct counter_desc drop_rq_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
+};
+
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
+#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
{
- return priv->q_counter ? NUM_Q_COUNTERS : 0;
+ int num_stats = 0;
+
+ if (priv->q_counter)
+ num_stats += NUM_Q_COUNTERS;
+
+ if (priv->drop_rq_q_counter)
+ num_stats += NUM_DROP_RQ_COUNTERS;
+
+ return num_stats;
}
static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
@@ -182,7 +199,13 @@ static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
int i;
for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ q_stats_desc[i].format);
+
+ for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ drop_rq_stats_desc[i].format);
+
return idx;
}
@@ -191,7 +214,11 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
int i;
for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i);
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ q_stats_desc, i);
+ for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ drop_rq_stats_desc, i);
return idx;
}
@@ -199,16 +226,76 @@ static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
- int err;
- if (!priv->q_counter)
- return;
+ if (priv->q_counter &&
+ !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
+ sizeof(out)))
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ if (priv->drop_rq_q_counter &&
+ !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
+ out, sizeof(out)))
+ qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
+ out_of_buffer);
+}
+
+#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
+static const struct counter_desc vnic_env_stats_desc[] = {
+ { "rx_steer_missed_packets",
+ VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
+};
+
+#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc)
+
+static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
+{
+ return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
+ NUM_VNIC_ENV_COUNTERS : 0;
+}
+
+static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
+ return idx;
+
+ for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vnic_env_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
+ return idx;
- err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
- if (err)
+ for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_desc, i);
+ return idx;
+}
+
+static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
+{
+ u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
+ int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
return;
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+ MLX5_SET(query_vnic_env_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VNIC_ENV);
+ MLX5_SET(query_vnic_env_in, in, op_mod, 0);
+ MLX5_SET(query_vnic_env_in, in, other_vport, 0);
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
@@ -754,7 +841,15 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
+static const struct counter_desc pport_pfc_stall_stats_desc[] = {
+ { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
+ { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
+};
+
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
+ MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
+ MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
{
@@ -790,7 +885,8 @@ static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
{
return (mlx5e_query_global_pause_combined(priv) +
hweight8(mlx5e_query_pfc_combined(priv))) *
- NUM_PPORT_PER_PRIO_PFC_COUNTERS;
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS +
+ NUM_PPORT_PFC_STALL_COUNTERS(priv);
}
static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
@@ -818,6 +914,10 @@ static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
}
}
+ for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_pfc_stall_stats_desc[i].format);
+
return idx;
}
@@ -845,6 +945,10 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
}
}
+ for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
+ pport_pfc_stall_stats_desc, i);
+
return idx;
}
@@ -1003,6 +1107,8 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
};
static const struct counter_desc ch_stats_desc[] = {
@@ -1095,6 +1201,12 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
.update_stats = mlx5e_grp_q_update_stats,
},
{
+ .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
+ .fill_strings = mlx5e_grp_vnic_env_fill_strings,
+ .fill_stats = mlx5e_grp_vnic_env_fill_stats,
+ .update_stats = mlx5e_grp_vnic_env_update_stats,
+ },
+ {
.get_num_stats = mlx5e_grp_vport_get_num_stats,
.fill_strings = mlx5e_grp_vport_fill_strings,
.fill_stats = mlx5e_grp_vport_fill_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 0b3320a2b072..53111a2df587 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -78,6 +78,8 @@ struct mlx5e_sw_stats {
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 tx_xmit_more;
+ u64 tx_cqe_err;
+ u64 tx_recover;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_buff_alloc_err;
@@ -97,6 +99,11 @@ struct mlx5e_sw_stats {
struct mlx5e_qcounter_stats {
u32 rx_out_of_buffer;
+ u32 rx_if_down_packets;
+};
+
+struct mlx5e_vnic_env_stats {
+ __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
@@ -192,6 +199,8 @@ struct mlx5e_sq_stats {
u64 stopped;
u64 wake;
u64 dropped;
+ u64 cqe_err;
+ u64 recover;
};
struct mlx5e_ch_stats {
@@ -201,6 +210,7 @@ struct mlx5e_ch_stats {
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
+ struct mlx5e_vnic_env_stats vnic;
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
struct rtnl_link_stats64 vf_vport;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 7c33df2034f0..3e4a7e81b67f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2530,12 +2530,17 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
- tcf_vlan_push_prio(a))
- return -EOPNOTSUPP;
-
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- attr->vlan = tcf_vlan_push_vid(a);
+ attr->vlan_vid = tcf_vlan_push_vid(a);
+ if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
+ attr->vlan_prio = tcf_vlan_push_prio(a);
+ attr->vlan_proto = tcf_vlan_push_proto(a);
+ if (!attr->vlan_proto)
+ attr->vlan_proto = htons(ETH_P_8021Q);
+ } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
+ tcf_vlan_push_prio(a)) {
+ return -EOPNOTSUPP;
+ }
} else { /* action is TCA_VLAN_ACT_MODIFY */
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 11b4f1089d1c..20297108528a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -417,6 +417,18 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb, wqe, pi);
}
+static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
+ struct mlx5_err_cqe *err_cqe)
+{
+ u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
+
+ netdev_err(sq->channel->netdev,
+ "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
+ sq->cq.mcq.cqn, ci, sq->sqn, err_cqe->syndrome,
+ err_cqe->vendor_err_synd);
+ mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_txqsq *sq;
@@ -456,6 +468,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
+ if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) {
+ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
+ &sq->state)) {
+ mlx5e_dump_error_cqe(sq,
+ (struct mlx5_err_cqe *)cqe);
+ queue_work(cq->channel->priv->wq,
+ &sq->recover.recover_work);
+ }
+ sq->stats.cqe_err++;
+ }
+
do {
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
@@ -509,7 +532,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
+ mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
+ MLX5E_SQ_STOP_ROOM) &&
+ !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
sq->stats.wake++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 77b7272eaaa8..332bc56306bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2096,17 +2096,19 @@ unlock:
return err;
}
-static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
- int vport_idx,
- struct mlx5_vport_drop_stats *stats)
+static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ int vport_idx,
+ struct mlx5_vport_drop_stats *stats)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_vport *vport = &esw->vports[vport_idx];
+ u64 rx_discard_vport_down, tx_discard_vport_down;
u64 bytes = 0;
u16 idx = 0;
+ int err = 0;
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
- return;
+ return 0;
if (vport->egress.drop_counter) {
idx = vport->egress.drop_counter->id;
@@ -2117,6 +2119,23 @@ static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
idx = vport->ingress.drop_counter->id;
mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
}
+
+ if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
+ !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
+ return 0;
+
+ err = mlx5_query_vport_down_stats(dev, vport_idx,
+ &rx_discard_vport_down,
+ &tx_discard_vport_down);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
+ stats->rx_dropped += rx_discard_vport_down;
+ if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
+ stats->tx_dropped += tx_discard_vport_down;
+
+ return 0;
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
@@ -2180,7 +2199,9 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
- mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ if (err)
+ goto free_out;
vf_stats->rx_dropped = stats.rx_dropped;
vf_stats->tx_dropped = stats.tx_dropped;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 98d2177d0806..4cd773fa55e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -227,15 +227,14 @@ enum {
SET_VLAN_INSERT = BIT(1)
};
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x4000
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000
-
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep;
int action;
- u16 vlan;
+ __be16 vlan_proto;
+ u16 vlan_vid;
+ u8 vlan_prio;
bool vlan_handled;
u32 encap_id;
u32 mod_hdr_id;
@@ -258,6 +257,12 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos, u8 set_flags);
+static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
+}
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(dev, format, ...) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0a8303c1b52f..35e256eb2f6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -58,8 +58,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
- /* per flow vlan pop/push is emulated, don't set that into the firmware */
- flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ flow_act.action = attr->action;
+ /* if per flow vlan pop/push is emulated, don't set that into the firmware */
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
+ flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ flow_act.vlan.ethtype = ntohs(attr->vlan_proto);
+ flow_act.vlan.vid = attr->vlan_vid;
+ flow_act.vlan.prio = attr->vlan_prio;
+ }
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
@@ -88,10 +96,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
flow_act.encap_id = attr->encap_id;
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
@@ -185,7 +193,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
/* protects against (1) setting rules with different vlans to push and
* (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
*/
- if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
+ if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
goto out_notsupp;
return 0;
@@ -202,6 +210,10 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
bool push, pop, fwd;
int err = 0;
+ /* nop if we're on the vlan push/pop non emulation mode */
+ if (mlx5_eswitch_vlan_actions_supported(esw->dev))
+ return 0;
+
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
@@ -239,11 +251,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (vport->vlan_refcount)
goto skip_set_push;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
SET_VLAN_INSERT | SET_VLAN_STRIP);
if (err)
goto out;
- vport->vlan = attr->vlan;
+ vport->vlan = attr->vlan_vid;
skip_set_push:
vport->vlan_refcount++;
}
@@ -261,6 +273,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
bool push, pop, fwd;
int err = 0;
+ /* nop if we're on the vlan push/pop non emulation mode */
+ if (mlx5_eswitch_vlan_actions_supported(esw->dev))
+ return 0;
+
if (!attr->vlan_handled)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 645f83cac34d..ef5afd7c9325 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -317,7 +317,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5_flow_rule *dst;
- void *in_flow_context;
+ void *in_flow_context, *vlan;
void *in_match_value;
void *in_dests;
u32 *in;
@@ -340,11 +340,19 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_id);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio);
+
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, sizeof(fte->val));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 3ba07c7096ef..de51e7c39bc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1439,7 +1439,9 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_ENCAP |
MLX5_FLOW_CONTEXT_ACTION_DECAP |
- MLX5_FLOW_CONTEXT_ACTION_MOD_HDR))
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
return true;
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 9d11e92fb541..d7bb10ab2173 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -183,6 +183,9 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, debug))
+ mlx5_core_get_caps(dev, MLX5_CAP_DEBUG);
+
if (MLX5_CAP_GEN(dev, pcam_reg))
mlx5_get_pcam_reg(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index f953378bd13d..a35608faf8d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -56,7 +56,9 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
/* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
- mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false);
+ mlx5e_set_rq_type(mdev, params);
+ mlx5e_init_rq_type_params(mdev, params);
/* RQ size in ipoib by default is 512 */
params->log_rq_size = is_kdump_kernel() ?
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 4e25f2b2e0bc..7d001fe6e631 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -50,6 +50,11 @@ extern uint mlx5_core_debug_mask;
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+#define mlx5_core_dbg_once(__dev, format, ...) \
+ dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
do { \
if ((mask) & mlx5_core_debug_mask) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index c37d00cd472a..fa9d0760dd36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -483,6 +483,17 @@ int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
+static int mlx5_query_pfcc_reg(struct mlx5_core_dev *dev, u32 *out,
+ u32 out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ out_size, MLX5_REG_PFCC, 0, 0);
+}
+
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
@@ -500,13 +511,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
- MLX5_SET(pfcc_reg, in, local_port, 1);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PFCC, 0, 0);
+ err = mlx5_query_pfcc_reg(dev, out, sizeof(out));
if (err)
return err;
@@ -520,6 +528,49 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ MLX5_SET(pfcc_reg, in, pptx_mask_n, 1);
+ MLX5_SET(pfcc_reg, in, pprx_mask_n, 1);
+ MLX5_SET(pfcc_reg, in, ppan_mask_n, 1);
+ MLX5_SET(pfcc_reg, in, critical_stall_mask, 1);
+ MLX5_SET(pfcc_reg, in, minor_stall_mask, 1);
+ MLX5_SET(pfcc_reg, in, device_stall_critical_watermark,
+ stall_critical_watermark);
+ MLX5_SET(pfcc_reg, in, device_stall_minor_watermark, stall_minor_watermark);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+}
+
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark,
+ u16 *stall_minor_watermark)
+{
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ err = mlx5_query_pfcc_reg(dev, out, sizeof(out));
+ if (err)
+ return err;
+
+ if (stall_critical_watermark)
+ *stall_critical_watermark = MLX5_GET(pfcc_reg, out,
+ device_stall_critical_watermark);
+
+ if (stall_minor_watermark)
+ *stall_minor_watermark = MLX5_GET(pfcc_reg, out,
+ device_stall_minor_watermark);
+
+ return 0;
+}
+
int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
@@ -538,13 +589,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
- MLX5_SET(pfcc_reg, in, local_port, 1);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PFCC, 0, 0);
+ err = mlx5_query_pfcc_reg(dev, out, sizeof(out));
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 9e38343a951f..c64957b5ef47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -157,6 +157,31 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
}
EXPORT_SYMBOL(mlx5_core_query_sq);
+int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state)
+{
+ void *out;
+ void *sqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(query_sq_out);
+ out = kvzalloc(inlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_sq(dev, sqn, out);
+ if (err)
+ goto out;
+
+ sqc = MLX5_ADDR_OF(query_sq_out, out, sq_context);
+ *state = MLX5_GET(sqc, sqc, state);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state);
+
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index dfe36cf6fbea..177e076b8d17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1070,6 +1070,32 @@ free:
}
EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
+int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
+ u64 *rx_discard_vport_down,
+ u64 *tx_discard_vport_down)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
+ int err;
+
+ MLX5_SET(query_vnic_env_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VNIC_ENV);
+ MLX5_SET(query_vnic_env_in, in, op_mod, 0);
+ MLX5_SET(query_vnic_env_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(query_vnic_env_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
+ vport_env.receive_discard_vport_down);
+ *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
+ vport_env.transmit_discard_vport_down);
+ return 0;
+}
+
int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
int vf,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index ab710e37af99..84185f8dfbae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -218,32 +218,32 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
switch (attr_type) {
case MLXSW_HWMON_ATTR_TYPE_TEMP:
mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_show;
- mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_input", num + 1);
break;
case MLXSW_HWMON_ATTR_TYPE_TEMP_MAX:
mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_max_show;
- mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_highest", num + 1);
break;
case MLXSW_HWMON_ATTR_TYPE_TEMP_RST:
mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_temp_rst_store;
- mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0200;
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_reset_history", num + 1);
break;
case MLXSW_HWMON_ATTR_TYPE_FAN_RPM:
mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_rpm_show;
- mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"fan%u_input", num + 1);
break;
case MLXSW_HWMON_ATTR_TYPE_PWM:
mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show;
mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store;
- mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR | S_IRUGO;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0644;
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"pwm%u", num + 1);
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index e002398364c8..6218231e379e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4225,6 +4225,12 @@ MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
*/
MLXSW_ITEM32(reg, ritr, ipv4_mc, 0x00, 27, 1);
+/* reg_ritr_ipv6_mc
+ * IPv6 multicast routing enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6_mc, 0x00, 26, 1);
+
enum mlxsw_reg_ritr_if_type {
/* VLAN interface. */
MLXSW_REG_RITR_VLAN_IF,
@@ -4290,6 +4296,14 @@ MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
*/
MLXSW_ITEM32(reg, ritr, ipv4_mc_fe, 0x04, 27, 1);
+/* reg_ritr_ipv6_mc_fe
+ * IPv6 Multicast Forwarding Enable.
+ * When disabled, forwarding is blocked but local traffic (traps and IP to me)
+ * will be enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6_mc_fe, 0x04, 26, 1);
+
/* reg_ritr_lb_en
* Loop-back filter enable for unicast packets.
* If the flag is set then loop-back filter for unicast packets is
@@ -4513,12 +4527,14 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_ipv4_set(payload, 1);
mlxsw_reg_ritr_ipv6_set(payload, 1);
mlxsw_reg_ritr_ipv4_mc_set(payload, 1);
+ mlxsw_reg_ritr_ipv6_mc_set(payload, 1);
mlxsw_reg_ritr_type_set(payload, type);
mlxsw_reg_ritr_op_set(payload, op);
mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
mlxsw_reg_ritr_ipv6_fe_set(payload, 1);
mlxsw_reg_ritr_ipv4_mc_fe_set(payload, 1);
+ mlxsw_reg_ritr_ipv6_mc_fe_set(payload, 1);
mlxsw_reg_ritr_lb_en_set(payload, 1);
mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
mlxsw_reg_ritr_mtu_set(payload, mtu);
@@ -6302,30 +6318,34 @@ MLXSW_ITEM32(reg, rmft2, irif_mask, 0x08, 24, 1);
*/
MLXSW_ITEM32(reg, rmft2, irif, 0x08, 0, 16);
-/* reg_rmft2_dip4
- * Destination IPv4 address
+/* reg_rmft2_dip{4,6}
+ * Destination IPv4/6 address
* Access: RW
*/
+MLXSW_ITEM_BUF(reg, rmft2, dip6, 0x10, 16);
MLXSW_ITEM32(reg, rmft2, dip4, 0x1C, 0, 32);
-/* reg_rmft2_dip4_mask
+/* reg_rmft2_dip{4,6}_mask
* A bit that is set directs the TCAM to compare the corresponding bit in key. A
* bit that is clear directs the TCAM to ignore the corresponding bit in key.
* Access: RW
*/
+MLXSW_ITEM_BUF(reg, rmft2, dip6_mask, 0x20, 16);
MLXSW_ITEM32(reg, rmft2, dip4_mask, 0x2C, 0, 32);
-/* reg_rmft2_sip4
- * Source IPv4 address
+/* reg_rmft2_sip{4,6}
+ * Source IPv4/6 address
* Access: RW
*/
+MLXSW_ITEM_BUF(reg, rmft2, sip6, 0x30, 16);
MLXSW_ITEM32(reg, rmft2, sip4, 0x3C, 0, 32);
-/* reg_rmft2_sip4_mask
+/* reg_rmft2_sip{4,6}_mask
* A bit that is set directs the TCAM to compare the corresponding bit in key. A
* bit that is clear directs the TCAM to ignore the corresponding bit in key.
* Access: RW
*/
+MLXSW_ITEM_BUF(reg, rmft2, sip6_mask, 0x40, 16);
MLXSW_ITEM32(reg, rmft2, sip4_mask, 0x4C, 0, 32);
/* reg_rmft2_flexible_action_set
@@ -6343,26 +6363,52 @@ MLXSW_ITEM_BUF(reg, rmft2, flexible_action_set, 0x80,
MLXSW_REG_FLEX_ACTION_SET_LEN);
static inline void
-mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router,
- enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
- u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask,
- const char *flexible_action_set)
+mlxsw_reg_rmft2_common_pack(char *payload, bool v, u16 offset,
+ u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ const char *flex_action_set)
{
MLXSW_REG_ZERO(rmft2, payload);
mlxsw_reg_rmft2_v_set(payload, v);
- mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4);
mlxsw_reg_rmft2_op_set(payload, MLXSW_REG_RMFT2_OP_READ_WRITE);
mlxsw_reg_rmft2_offset_set(payload, offset);
mlxsw_reg_rmft2_virtual_router_set(payload, virtual_router);
mlxsw_reg_rmft2_irif_mask_set(payload, irif_mask);
mlxsw_reg_rmft2_irif_set(payload, irif);
+ if (flex_action_set)
+ mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload,
+ flex_action_set);
+}
+
+static inline void
+mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask,
+ const char *flexible_action_set)
+{
+ mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router,
+ irif_mask, irif, flexible_action_set);
+ mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4);
mlxsw_reg_rmft2_dip4_set(payload, dip4);
mlxsw_reg_rmft2_dip4_mask_set(payload, dip4_mask);
mlxsw_reg_rmft2_sip4_set(payload, sip4);
mlxsw_reg_rmft2_sip4_mask_set(payload, sip4_mask);
- if (flexible_action_set)
- mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload,
- flexible_action_set);
+}
+
+static inline void
+mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ struct in6_addr dip6, struct in6_addr dip6_mask,
+ struct in6_addr sip6, struct in6_addr sip6_mask,
+ const char *flexible_action_set)
+{
+ mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router,
+ irif_mask, irif, flexible_action_set);
+ mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV6);
+ mlxsw_reg_rmft2_dip6_memcpy_to(payload, (void *)&dip6);
+ mlxsw_reg_rmft2_dip6_mask_memcpy_to(payload, (void *)&dip6_mask);
+ mlxsw_reg_rmft2_sip6_memcpy_to(payload, (void *)&sip6);
+ mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}
/* MFCR - Management Fan Control Register
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 7885fc475f7e..4aa84442e357 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3380,6 +3380,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
+ MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 978a3c70653a..a82539609d49 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -33,6 +33,7 @@
*/
#include <linux/rhashtable.h>
+#include <net/ipv6.h>
#include "spectrum_mr.h"
#include "spectrum_router.h"
@@ -47,6 +48,11 @@ struct mlxsw_sp_mr {
/* priv has to be always the last item */
};
+struct mlxsw_sp_mr_vif;
+struct mlxsw_sp_mr_vif_ops {
+ bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif);
+};
+
struct mlxsw_sp_mr_vif {
struct net_device *dev;
const struct mlxsw_sp_rif *rif;
@@ -61,6 +67,9 @@ struct mlxsw_sp_mr_vif {
* instance is used as an ingress VIF
*/
struct list_head route_ivif_list;
+
+ /* Protocol specific operations for a VIF */
+ const struct mlxsw_sp_mr_vif_ops *ops;
};
struct mlxsw_sp_mr_route_vif_entry {
@@ -70,6 +79,17 @@ struct mlxsw_sp_mr_route_vif_entry {
struct mlxsw_sp_mr_route *mr_route;
};
+struct mlxsw_sp_mr_table;
+struct mlxsw_sp_mr_table_ops {
+ bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mr_mfc *mfc);
+ void (*key_create)(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mr_mfc *mfc);
+ bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_mr_route *mr_route);
+};
+
struct mlxsw_sp_mr_table {
struct list_head node;
enum mlxsw_sp_l3proto proto;
@@ -78,6 +98,7 @@ struct mlxsw_sp_mr_table {
struct mlxsw_sp_mr_vif vifs[MAXVIFS];
struct list_head route_list;
struct rhashtable route_ht;
+ const struct mlxsw_sp_mr_table_ops *ops;
char catchall_route_priv[0];
/* catchall_route_priv has to be always the last item */
};
@@ -88,7 +109,7 @@ struct mlxsw_sp_mr_route {
struct mlxsw_sp_mr_route_key key;
enum mlxsw_sp_mr_route_action route_action;
u16 min_mtu;
- struct mfc_cache *mfc4;
+ struct mr_mfc *mfc;
void *route_priv;
const struct mlxsw_sp_mr_table *mr_table;
/* A list of route_vif_entry structs that point to the egress VIFs */
@@ -104,14 +125,9 @@ static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
.automatic_shrinking = true,
};
-static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif)
-{
- return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
-}
-
static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
{
- return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif;
+ return vif->ops->is_regular(vif) && vif->dev && vif->rif;
}
static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
@@ -122,18 +138,9 @@ static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
static bool
mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
{
- vifi_t ivif;
+ vifi_t ivif = mr_route->mfc->mfc_parent;
- switch (mr_route->mr_table->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- ivif = mr_route->mfc4->_c.mfc_parent;
- return mr_route->mfc4->_c.mfc_un.res.ttls[ivif] != 255;
- case MLXSW_SP_L3_PROTO_IPV6:
- /* fall through */
- default:
- WARN_ON_ONCE(1);
- }
- return false;
+ return mr_route->mfc->mfc_un.res.ttls[ivif] != 255;
}
static int
@@ -149,19 +156,6 @@ mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
return valid_evifs;
}
-static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route)
-{
- switch (mr_route->mr_table->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
- case MLXSW_SP_L3_PROTO_IPV6:
- /* fall through */
- default:
- WARN_ON_ONCE(1);
- }
- return false;
-}
-
static enum mlxsw_sp_mr_route_action
mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
{
@@ -174,7 +168,8 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
/* The kernel does not match a (*,G) route that the ingress interface is
* not one of the egress interfaces, so trap these kind of routes.
*/
- if (mlxsw_sp_mr_route_starg(mr_route) &&
+ if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
+ mr_route) &&
!mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
@@ -195,25 +190,11 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
static enum mlxsw_sp_mr_route_prio
mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
{
- return mlxsw_sp_mr_route_starg(mr_route) ?
+ return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
+ mr_route) ?
MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
}
-static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
- struct mlxsw_sp_mr_route_key *key,
- const struct mfc_cache *mfc)
-{
- bool starg = (mfc->mfc_origin == htonl(INADDR_ANY));
-
- memset(key, 0, sizeof(*key));
- key->vrid = mr_table->vr_id;
- key->proto = mr_table->proto;
- key->group.addr4 = mfc->mfc_mcastgrp;
- key->group_mask.addr4 = htonl(0xffffffff);
- key->source.addr4 = mfc->mfc_origin;
- key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
-}
-
static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
struct mlxsw_sp_mr_vif *mr_vif)
{
@@ -343,8 +324,8 @@ static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
}
static struct mlxsw_sp_mr_route *
-mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
- struct mfc_cache *mfc)
+mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc)
{
struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
struct mlxsw_sp_mr_route *mr_route;
@@ -356,15 +337,16 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
if (!mr_route)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&mr_route->evif_list);
- mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc);
/* Find min_mtu and link iVIF and eVIFs */
mr_route->min_mtu = ETH_MAX_MTU;
- ipmr_cache_hold(mfc);
- mr_route->mfc4 = mfc;
+ mr_cache_hold(mfc);
+ mr_route->mfc = mfc;
+ mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc);
+
mr_route->mr_table = mr_table;
for (i = 0; i < MAXVIFS; i++) {
- if (mfc->_c.mfc_un.res.ttls[i] != 255) {
+ if (mfc->mfc_un.res.ttls[i] != 255) {
err = mlxsw_sp_mr_route_evif_link(mr_route,
&mr_table->vifs[i]);
if (err)
@@ -375,59 +357,37 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
}
}
mlxsw_sp_mr_route_ivif_link(mr_route,
- &mr_table->vifs[mfc->_c.mfc_parent]);
+ &mr_table->vifs[mfc->mfc_parent]);
mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
return mr_route;
err:
- ipmr_cache_put(mfc);
+ mr_cache_put(mfc);
list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
mlxsw_sp_mr_route_evif_unlink(rve);
kfree(mr_route);
return ERR_PTR(err);
}
-static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table,
- struct mlxsw_sp_mr_route *mr_route)
+static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
{
struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
mlxsw_sp_mr_route_ivif_unlink(mr_route);
- ipmr_cache_put(mr_route->mfc4);
+ mr_cache_put(mr_route->mfc);
list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
mlxsw_sp_mr_route_evif_unlink(rve);
kfree(mr_route);
}
-static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
- struct mlxsw_sp_mr_route *mr_route)
-{
- switch (mr_table->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- /* fall through */
- default:
- WARN_ON_ONCE(1);
- }
-}
-
static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
bool offload)
{
- switch (mr_route->mr_table->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- if (offload)
- mr_route->mfc4->_c.mfc_flags |= MFC_OFFLOAD;
- else
- mr_route->mfc4->_c.mfc_flags &= ~MFC_OFFLOAD;
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- /* fall through */
- default:
- WARN_ON_ONCE(1);
- }
+ if (offload)
+ mr_route->mfc->mfc_flags |= MFC_OFFLOAD;
+ else
+ mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD;
}
static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
@@ -449,25 +409,18 @@ static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
mlxsw_sp_mr_route_destroy(mr_table, mr_route);
}
-int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
- struct mfc_cache *mfc, bool replace)
+int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc, bool replace)
{
struct mlxsw_sp_mr_route *mr_orig_route = NULL;
struct mlxsw_sp_mr_route *mr_route;
int err;
- /* If the route is a (*,*) route, abort, as these kind of routes are
- * used for proxy routes.
- */
- if (mfc->mfc_origin == htonl(INADDR_ANY) &&
- mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
- dev_warn(mr_table->mlxsw_sp->bus_info->dev,
- "Offloading proxy routes is not supported.\n");
+ if (!mr_table->ops->is_route_valid(mr_table, mfc))
return -EINVAL;
- }
/* Create a new route */
- mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc);
+ mr_route = mlxsw_sp_mr_route_create(mr_table, mfc);
if (IS_ERR(mr_route))
return PTR_ERR(mr_route);
@@ -512,7 +465,7 @@ int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
&mr_orig_route->ht_node,
mlxsw_sp_mr_route_ht_params);
list_del(&mr_orig_route->node);
- mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route);
+ mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
}
mlxsw_sp_mr_mfc_offload_update(mr_route);
@@ -525,17 +478,17 @@ err_rhashtable_insert:
list_del(&mr_route->node);
err_no_orig_route:
err_duplicate_route:
- mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
+ mlxsw_sp_mr_route_destroy(mr_table, mr_route);
return err;
}
-void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
- struct mfc_cache *mfc)
+void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc)
{
struct mlxsw_sp_mr_route *mr_route;
struct mlxsw_sp_mr_route_key key;
- mlxsw_sp_mr_route4_key(mr_table, &key, mfc);
+ mr_table->ops->key_create(mr_table, &key, mfc);
mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
mlxsw_sp_mr_route_ht_params);
if (mr_route)
@@ -840,6 +793,125 @@ void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
}
}
+/* Protocol specific functions */
+static bool
+mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mr_mfc *c)
+{
+ struct mfc_cache *mfc = (struct mfc_cache *) c;
+
+ /* If the route is a (*,*) route, abort, as these kind of routes are
+ * used for proxy routes.
+ */
+ if (mfc->mfc_origin == htonl(INADDR_ANY) &&
+ mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ return false;
+ }
+ return true;
+}
+
+static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mr_mfc *c)
+{
+ const struct mfc_cache *mfc = (struct mfc_cache *) c;
+ bool starg;
+
+ starg = (mfc->mfc_origin == htonl(INADDR_ANY));
+
+ memset(key, 0, sizeof(*key));
+ key->vrid = mr_table->vr_id;
+ key->proto = MLXSW_SP_L3_PROTO_IPV4;
+ key->group.addr4 = mfc->mfc_mcastgrp;
+ key->group_mask.addr4 = htonl(0xffffffff);
+ key->source.addr4 = mfc->mfc_origin;
+ key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
+}
+
+static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_mr_route *mr_route)
+{
+ return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
+}
+
+static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif)
+{
+ return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
+}
+
+static bool
+mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mr_mfc *c)
+{
+ struct mfc6_cache *mfc = (struct mfc6_cache *) c;
+
+ /* If the route is a (*,*) route, abort, as these kind of routes are
+ * used for proxy routes.
+ */
+ if (ipv6_addr_any(&mfc->mf6c_origin) &&
+ ipv6_addr_any(&mfc->mf6c_mcastgrp)) {
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ return false;
+ }
+ return true;
+}
+
+static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mr_mfc *c)
+{
+ const struct mfc6_cache *mfc = (struct mfc6_cache *) c;
+
+ memset(key, 0, sizeof(*key));
+ key->vrid = mr_table->vr_id;
+ key->proto = MLXSW_SP_L3_PROTO_IPV6;
+ key->group.addr6 = mfc->mf6c_mcastgrp;
+ memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6));
+ key->source.addr6 = mfc->mf6c_origin;
+ if (!ipv6_addr_any(&mfc->mf6c_origin))
+ memset(&key->source_mask.addr6, 0xff,
+ sizeof(key->source_mask.addr6));
+}
+
+static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_mr_route *mr_route)
+{
+ return ipv6_addr_any(&mr_route->key.source_mask.addr6);
+}
+
+static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif)
+{
+ return !(vif->vif_flags & MIFF_REGISTER);
+}
+
+static struct
+mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = {
+ {
+ .is_regular = mlxsw_sp_mr_vif4_is_regular,
+ },
+ {
+ .is_regular = mlxsw_sp_mr_vif6_is_regular,
+ },
+};
+
+static struct
+mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = {
+ {
+ .is_route_valid = mlxsw_sp_mr_route4_validate,
+ .key_create = mlxsw_sp_mr_route4_key,
+ .is_route_starg = mlxsw_sp_mr_route4_starg,
+ },
+ {
+ .is_route_valid = mlxsw_sp_mr_route6_validate,
+ .key_create = mlxsw_sp_mr_route6_key,
+ .is_route_starg = mlxsw_sp_mr_route6_starg,
+ },
+
+};
+
struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
u32 vr_id,
enum mlxsw_sp_l3proto proto)
@@ -848,6 +920,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
.prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
.key = {
.vrid = vr_id,
+ .proto = proto,
},
.value = {
.route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
@@ -866,6 +939,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
mr_table->vr_id = vr_id;
mr_table->mlxsw_sp = mlxsw_sp;
mr_table->proto = proto;
+ mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
INIT_LIST_HEAD(&mr_table->route_list);
err = rhashtable_init(&mr_table->route_ht,
@@ -876,6 +950,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MAXVIFS; i++) {
INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
+ mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto];
}
err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
@@ -942,18 +1017,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
&bytes);
- switch (mr_route->mr_table->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- if (mr_route->mfc4->_c.mfc_un.res.pkt != packets)
- mr_route->mfc4->_c.mfc_un.res.lastuse = jiffies;
- mr_route->mfc4->_c.mfc_un.res.pkt = packets;
- mr_route->mfc4->_c.mfc_un.res.bytes = bytes;
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- /* fall through */
- default:
- WARN_ON_ONCE(1);
- }
+ if (mr_route->mfc->mfc_un.res.pkt != packets)
+ mr_route->mfc->mfc_un.res.lastuse = jiffies;
+ mr_route->mfc->mfc_un.res.pkt = packets;
+ mr_route->mfc->mfc_un.res.bytes = bytes;
}
static void mlxsw_sp_mr_stats_update(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
index 5d26a122af49..7c864a86811d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
@@ -36,6 +36,7 @@
#define _MLXSW_SPECTRUM_MCROUTER_H
#include <linux/mroute.h>
+#include <linux/mroute6.h>
#include "spectrum_router.h"
#include "spectrum.h"
@@ -109,10 +110,10 @@ struct mlxsw_sp_mr_table;
int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_mr_ops *mr_ops);
void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
- struct mfc_cache *mfc, bool replace);
-void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
- struct mfc_cache *mfc);
+int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc, bool replace);
+void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc);
int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
struct net_device *dev, vifi_t vif_index,
unsigned long vif_flags,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 4c7f32d4288d..4f4c0d311883 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -51,7 +51,7 @@ struct mlxsw_sp_mr_tcam_region {
};
struct mlxsw_sp_mr_tcam {
- struct mlxsw_sp_mr_tcam_region ipv4_tcam_region;
+ struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
};
/* This struct maps to one RIGR2 register entry */
@@ -316,20 +316,37 @@ static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
mlxsw_afa_block_first_set(afa_block));
break;
case MLXSW_SP_L3_PROTO_IPV6:
- default:
- WARN_ON_ONCE(1);
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ key->group.addr6,
+ key->group_mask.addr6,
+ key->source.addr6,
+ key->source_mask.addr6,
+ mlxsw_afa_block_first_set(afa_block));
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
+ struct mlxsw_sp_mr_route_key *key,
struct parman_item *parman_item)
{
+ struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
char rmft2_pl[MLXSW_REG_RMFT2_LEN];
- mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid,
- 0, 0, 0, 0, 0, 0, NULL);
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
+ vrid, 0, 0, 0, 0, 0, 0, NULL);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
+ vrid, 0, 0, zero_addr, zero_addr,
+ zero_addr, zero_addr, NULL);
+ break;
+ }
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
@@ -353,27 +370,30 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static struct mlxsw_sp_mr_tcam_region *
+mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
+ enum mlxsw_sp_l3proto proto)
+{
+ return &mr_tcam->tcam_regions[proto];
+}
+
static int
mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
struct mlxsw_sp_mr_tcam_route *route,
enum mlxsw_sp_mr_route_prio prio)
{
- struct parman_prio *parman_prio = NULL;
+ struct mlxsw_sp_mr_tcam_region *tcam_region;
int err;
- switch (route->key.proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio];
- err = parman_item_add(mr_tcam->ipv4_tcam_region.parman,
- parman_prio, &route->parman_item);
- if (err)
- return err;
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- default:
- WARN_ON_ONCE(1);
- }
- route->parman_prio = parman_prio;
+ tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
+ route->key.proto);
+ err = parman_item_add(tcam_region->parman,
+ &tcam_region->parman_prios[prio],
+ &route->parman_item);
+ if (err)
+ return err;
+
+ route->parman_prio = &tcam_region->parman_prios[prio];
return 0;
}
@@ -381,15 +401,13 @@ static void
mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
struct mlxsw_sp_mr_tcam_route *route)
{
- switch (route->key.proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- parman_item_remove(mr_tcam->ipv4_tcam_region.parman,
- route->parman_prio, &route->parman_item);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- default:
- WARN_ON_ONCE(1);
- }
+ struct mlxsw_sp_mr_tcam_region *tcam_region;
+
+ tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
+ route->key.proto);
+
+ parman_item_remove(tcam_region->parman,
+ route->parman_prio, &route->parman_item);
}
static int
@@ -462,7 +480,7 @@ static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
- &route->parman_item);
+ &route->key, &route->parman_item);
mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
@@ -806,21 +824,42 @@ mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+ u32 rtar_key;
+ int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
return -EIO;
- return mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
- &mr_tcam->ipv4_tcam_region,
- MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST);
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
+ err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV4],
+ rtar_key);
+ if (err)
+ return err;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
+ err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV6],
+ rtar_key);
+ if (err)
+ goto err_ipv6_region_init;
+
+ return 0;
+
+err_ipv6_region_init:
+ mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ return err;
}
static void mlxsw_sp_mr_tcam_fini(void *priv)
{
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
- mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region);
+ mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
+ mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
}
const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 921bd1075edf..a9ccd974c620 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -467,7 +467,7 @@ struct mlxsw_sp_vr {
unsigned int rif_count;
struct mlxsw_sp_fib *fib4;
struct mlxsw_sp_fib *fib6;
- struct mlxsw_sp_mr_table *mr4_table;
+ struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
};
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
@@ -711,7 +711,9 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
{
- return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
+ return !!vr->fib4 || !!vr->fib6 ||
+ !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
+ !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
@@ -789,7 +791,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
struct netlink_ext_ack *extack)
{
- struct mlxsw_sp_mr_table *mr4_table;
+ struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
struct mlxsw_sp_fib *fib4;
struct mlxsw_sp_fib *fib6;
struct mlxsw_sp_vr *vr;
@@ -812,15 +814,25 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(mr4_table)) {
err = PTR_ERR(mr4_table);
- goto err_mr_table_create;
+ goto err_mr4_table_create;
}
+ mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(mr6_table)) {
+ err = PTR_ERR(mr6_table);
+ goto err_mr6_table_create;
+ }
+
vr->fib4 = fib4;
vr->fib6 = fib6;
- vr->mr4_table = mr4_table;
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
vr->tb_id = tb_id;
return vr;
-err_mr_table_create:
+err_mr6_table_create:
+ mlxsw_sp_mr_table_destroy(mr4_table);
+err_mr4_table_create:
mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
err_fib6_create:
mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
@@ -830,8 +842,10 @@ err_fib6_create:
static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr)
{
- mlxsw_sp_mr_table_destroy(vr->mr4_table);
- vr->mr4_table = NULL;
+ mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
+ mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
vr->fib6 = NULL;
mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
@@ -854,7 +868,8 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
{
if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
list_empty(&vr->fib6->node_list) &&
- mlxsw_sp_mr_table_empty(vr->mr4_table))
+ mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
+ mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
mlxsw_sp_vr_destroy(mlxsw_sp, vr);
}
@@ -5378,10 +5393,20 @@ static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static struct mlxsw_sp_mr_table *
+mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
+{
+ if (family == RTNL_FAMILY_IPMR)
+ return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
+ else
+ return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
+}
+
static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
struct mfc_entry_notifier_info *men_info,
bool replace)
{
+ struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted)
@@ -5391,12 +5416,14 @@ static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(vr))
return PTR_ERR(vr);
- return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
+ return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
}
static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
struct mfc_entry_notifier_info *men_info)
{
+ struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted)
@@ -5406,7 +5433,8 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!vr))
return;
- mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
+ mlxsw_sp_mr_route_del(mrt, men_info->mfc);
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
@@ -5414,6 +5442,7 @@ static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
struct vif_entry_notifier_info *ven_info)
{
+ struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_vr *vr;
@@ -5424,8 +5453,9 @@ mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(vr))
return PTR_ERR(vr);
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
- return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
+ return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
ven_info->vif_index,
ven_info->vif_flags, rif);
}
@@ -5434,6 +5464,7 @@ static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
struct vif_entry_notifier_info *ven_info)
{
+ struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted)
@@ -5443,7 +5474,8 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!vr))
return;
- mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
+ mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
@@ -5535,7 +5567,7 @@ static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
{
- int i;
+ int i, j;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
@@ -5543,7 +5575,8 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp_vr_is_used(vr))
continue;
- mlxsw_sp_mr_table_flush(vr->mr4_table);
+ for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
+ mlxsw_sp_mr_table_flush(vr->mr_table[j]);
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
/* If virtual router was only used for IPv4, then it's no
@@ -5682,11 +5715,11 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
replace);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- ipmr_cache_put(fib_work->men_info.mfc);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
- ipmr_cache_put(fib_work->men_info.mfc);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
@@ -5766,7 +5799,7 @@ mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
- ipmr_cache_hold(fib_work->men_info.mfc);
+ mr_cache_hold(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD: /* fall through */
case FIB_EVENT_VIF_DEL:
@@ -5808,6 +5841,10 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
if (!ipmr_rule_default(rule) && !rule->l3mdev)
err = -1;
break;
+ case RTNL_FAMILY_IP6MR:
+ if (!ip6mr_rule_default(rule) && !rule->l3mdev)
+ err = -1;
+ break;
}
if (err < 0)
@@ -5827,7 +5864,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
if (!net_eq(info->net, &init_net) ||
(info->family != AF_INET && info->family != AF_INET6 &&
- info->family != RTNL_FAMILY_IPMR))
+ info->family != RTNL_FAMILY_IPMR &&
+ info->family != RTNL_FAMILY_IP6MR))
return NOTIFY_DONE;
router = container_of(nb, struct mlxsw_sp_router, fib_nb);
@@ -5857,6 +5895,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
mlxsw_sp_router_fib6_event(fib_work, info);
break;
+ case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
mlxsw_sp_router_fibmr_event(fib_work, info);
@@ -6038,7 +6077,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_vr *vr;
u16 rif_index;
- int err;
+ int i, err;
type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
ops = mlxsw_sp->router->rif_ops_arr[type];
@@ -6078,9 +6117,11 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_configure;
- err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
- if (err)
- goto err_mr_rif_add;
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
+ err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
+ if (err)
+ goto err_mr_rif_add;
+ }
mlxsw_sp_rif_counters_alloc(rif);
mlxsw_sp->router->rifs[rif_index] = rif;
@@ -6088,6 +6129,8 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
return rif;
err_mr_rif_add:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
ops->deconfigure(rif);
err_configure:
if (fid)
@@ -6107,13 +6150,15 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
struct mlxsw_sp_vr *vr;
+ int i;
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
mlxsw_sp->router->rifs[rif->rif_index] = NULL;
mlxsw_sp_rif_counters_free(rif);
- mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
+ mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
ops->deconfigure(rif);
if (fid)
/* Loopback RIFs are not associated with a FID. */
@@ -6520,13 +6565,16 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
if (rif->mtu != dev->mtu) {
struct mlxsw_sp_vr *vr;
+ int i;
/* The RIF is relevant only to its mr_table instance, as unlike
* unicast routing, in multicast routing a RIF cannot be shared
* between several multicast routing tables.
*/
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
+ mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
+ rif, dev->mtu);
}
ether_addr_copy(rif->addr, dev->dev_addr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 1fb82246ce96..a01edcf56797 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -41,6 +41,7 @@
enum mlxsw_sp_l3proto {
MLXSW_SP_L3_PROTO_IPV4,
MLXSW_SP_L3_PROTO_IPV6,
+#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
};
union mlxsw_sp_l3addr {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index ec6cef8267ae..399e9d6993f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -77,6 +77,7 @@ enum {
MLXSW_TRAP_ID_IPV6_DHCP = 0x69,
MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
+ MLXSW_TRAP_ID_IPV6_PIM = 0x79,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
MLXSW_TRAP_ID_IPV6_BGP = 0x89,
MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 2521c8c40015..b2d2ec8c11e2 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -266,7 +266,7 @@ MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
/* Careful: must be accessed under kernel_param_lock() */
static char *myri10ge_fw_name = NULL;
-module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
+module_param(myri10ge_fw_name, charp, 0644);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
#define MYRI10GE_MAX_BOARDS 8
@@ -277,49 +277,49 @@ module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
MODULE_PARM_DESC(myri10ge_fw_names, "Firmware image names per board");
static int myri10ge_ecrc_enable = 1;
-module_param(myri10ge_ecrc_enable, int, S_IRUGO);
+module_param(myri10ge_ecrc_enable, int, 0444);
MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
static int myri10ge_small_bytes = -1; /* -1 == auto */
-module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
+module_param(myri10ge_small_bytes, int, 0644);
MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
static int myri10ge_msi = 1; /* enable msi by default */
-module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
+module_param(myri10ge_msi, int, 0644);
MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
static int myri10ge_intr_coal_delay = 75;
-module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
+module_param(myri10ge_intr_coal_delay, int, 0444);
MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
static int myri10ge_flow_control = 1;
-module_param(myri10ge_flow_control, int, S_IRUGO);
+module_param(myri10ge_flow_control, int, 0444);
MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
static int myri10ge_deassert_wait = 1;
-module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
+module_param(myri10ge_deassert_wait, int, 0644);
MODULE_PARM_DESC(myri10ge_deassert_wait,
"Wait when deasserting legacy interrupts");
static int myri10ge_force_firmware = 0;
-module_param(myri10ge_force_firmware, int, S_IRUGO);
+module_param(myri10ge_force_firmware, int, 0444);
MODULE_PARM_DESC(myri10ge_force_firmware,
"Force firmware to assume aligned completions");
static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
-module_param(myri10ge_initial_mtu, int, S_IRUGO);
+module_param(myri10ge_initial_mtu, int, 0444);
MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
static int myri10ge_napi_weight = 64;
-module_param(myri10ge_napi_weight, int, S_IRUGO);
+module_param(myri10ge_napi_weight, int, 0444);
MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
static int myri10ge_watchdog_timeout = 1;
-module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
+module_param(myri10ge_watchdog_timeout, int, 0444);
MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
static int myri10ge_max_irq_loops = 1048576;
-module_param(myri10ge_max_irq_loops, int, S_IRUGO);
+module_param(myri10ge_max_irq_loops, int, 0444);
MODULE_PARM_DESC(myri10ge_max_irq_loops,
"Set stuck legacy IRQ detection threshold");
@@ -330,21 +330,21 @@ module_param(myri10ge_debug, int, 0);
MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
static int myri10ge_fill_thresh = 256;
-module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
+module_param(myri10ge_fill_thresh, int, 0644);
MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
static int myri10ge_reset_recover = 1;
static int myri10ge_max_slices = 1;
-module_param(myri10ge_max_slices, int, S_IRUGO);
+module_param(myri10ge_max_slices, int, 0444);
MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
-module_param(myri10ge_rss_hash, int, S_IRUGO);
+module_param(myri10ge_rss_hash, int, 0444);
MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
static int myri10ge_dca = 1;
-module_param(myri10ge_dca, int, S_IRUGO);
+module_param(myri10ge_dca, int, 0444);
MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
#define MYRI10GE_FW_OFFSET 1024*1024
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 28c1cd5b823b..3f46d836d1b8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -61,6 +61,9 @@
#define NFP_FLOWER_MASK_MPLS_BOS BIT(8)
#define NFP_FLOWER_MASK_MPLS_Q BIT(0)
+#define NFP_FL_IP_FRAG_FIRST BIT(7)
+#define NFP_FL_IP_FRAGMENTED BIT(6)
+
/* Compressed HW representation of TCP Flags */
#define NFP_FL_TCP_FLAG_URG BIT(4)
#define NFP_FL_TCP_FLAG_PSH BIT(3)
@@ -260,6 +263,13 @@ struct nfp_flower_tp_ports {
__be16 port_dst;
};
+struct nfp_flower_ip_ext {
+ u8 tos;
+ u8 proto;
+ u8 ttl;
+ u8 flags;
+};
+
/* L3 IPv4 details (3W/12B)
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
@@ -272,10 +282,7 @@ struct nfp_flower_tp_ports {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_ipv4 {
- u8 tos;
- u8 proto;
- u8 ttl;
- u8 flags;
+ struct nfp_flower_ip_ext ip_ext;
__be32 ipv4_src;
__be32 ipv4_dst;
};
@@ -284,7 +291,7 @@ struct nfp_flower_ipv4 {
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | DSCP |ECN| protocol | reserved |
+ * | DSCP |ECN| protocol | ttl | flags |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv6_exthdr | res | ipv6_flow_label |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -306,10 +313,7 @@ struct nfp_flower_ipv4 {
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_flower_ipv6 {
- u8 tos;
- u8 proto;
- u8 ttl;
- u8 reserved;
+ struct nfp_flower_ip_ext ip_ext;
__be32 ipv6_flow_label_exthdr;
struct in6_addr ipv6_src;
struct in6_addr ipv6_dst;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index b3bc8279d4fb..91935405f586 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -146,26 +146,15 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
}
static void
-nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
- struct tc_cls_flower_offload *flow,
- bool mask_version)
+nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
- struct flow_dissector_key_ipv4_addrs *addr;
- struct flow_dissector_key_basic *basic;
-
- memset(frame, 0, sizeof(struct nfp_flower_ipv4));
-
- if (dissector_uses_key(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
- addr = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
- target);
- frame->ipv4_src = addr->src;
- frame->ipv4_dst = addr->dst;
- }
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *basic;
+
basic = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_BASIC,
target);
@@ -201,6 +190,40 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
if (tcp_flags & TCPHDR_URG)
frame->flags |= NFP_FL_TCP_FLAG_URG;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key;
+
+ key = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ target);
+ if (key->flags & FLOW_DIS_IS_FRAGMENT)
+ frame->flags |= NFP_FL_IP_FRAGMENTED;
+ if (key->flags & FLOW_DIS_FIRST_FRAG)
+ frame->flags |= NFP_FL_IP_FRAG_FIRST;
+ }
+}
+
+static void
+nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ipv4_addrs *addr;
+
+ memset(frame, 0, sizeof(struct nfp_flower_ipv4));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ addr = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ target);
+ frame->ipv4_src = addr->src;
+ frame->ipv4_dst = addr->dst;
+ }
+
+ nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
}
static void
@@ -210,7 +233,6 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
{
struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
struct flow_dissector_key_ipv6_addrs *addr;
- struct flow_dissector_key_basic *basic;
memset(frame, 0, sizeof(struct nfp_flower_ipv6));
@@ -223,22 +245,7 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
frame->ipv6_dst = addr->dst;
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
- basic = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
- target);
- frame->proto = basic->ip_proto;
- }
-
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
- struct flow_dissector_key_ip *flow_ip;
-
- flow_ip = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IP,
- target);
- frame->tos = flow_ip->tos;
- frame->ttl = flow_ip->ttl;
- }
+ nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
}
static void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index f3586c519805..114d2ab02a38 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -48,6 +48,10 @@
(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
TCPHDR_PSH | TCPHDR_URG)
+#define NFP_FLOWER_SUPPORTED_CTLFLAGS \
+ (FLOW_DIS_IS_FRAGMENT | \
+ FLOW_DIS_FIRST_FRAG)
+
#define NFP_FLOWER_WHITELIST_DISSECTOR \
(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_BASIC) | \
@@ -322,6 +326,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
}
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key_ctl;
+
+ key_ctl = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ flow->key);
+
+ if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
+ return -EOPNOTSUPP;
+ }
+
ret_key_ls->key_layer = key_layer;
ret_key_ls->key_layer_two = key_layer_two;
ret_key_ls->key_size = key_size;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index cf81cf95d1d8..67cdd8330c59 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -231,15 +231,15 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id)
for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) {
sprintf(name, "%d", i);
- debugfs_create_file(name, S_IRUSR, rx,
+ debugfs_create_file(name, 0400, rx,
&nn->r_vecs[i], &nfp_rx_q_fops);
- debugfs_create_file(name, S_IRUSR, xdp,
+ debugfs_create_file(name, 0400, xdp,
&nn->r_vecs[i], &nfp_xdp_q_fops);
}
for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) {
sprintf(name, "%d", i);
- debugfs_create_file(name, S_IRUSR, tx,
+ debugfs_create_file(name, 0400, tx,
&nn->r_vecs[i], &nfp_tx_q_fops);
}
}
diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig
new file mode 100644
index 000000000000..aa41e5f6e437
--- /dev/null
+++ b/drivers/net/ethernet/ni/Kconfig
@@ -0,0 +1,27 @@
+#
+# National Instuments network device configuration
+#
+
+config NET_VENDOR_NI
+ bool "National Instruments Devices"
+ default y
+ help
+ If you have a network (Ethernet) device belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about National Instrument devices.
+ If you say Y, you will be asked for your specific device in the
+ following questions.
+
+if NET_VENDOR_NI
+
+config NI_XGE_MANAGEMENT_ENET
+ tristate "National Instruments XGE management enet support"
+ depends on ARCH_ZYNQ
+ select PHYLIB
+ help
+ Simple LAN device for debug or management purposes. Can
+ support either 10G or 1G PHYs via SFP+ ports.
+
+endif
diff --git a/drivers/net/ethernet/ni/Makefile b/drivers/net/ethernet/ni/Makefile
new file mode 100644
index 000000000000..99c664651c51
--- /dev/null
+++ b/drivers/net/ethernet/ni/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NI_XGE_MANAGEMENT_ENET) += nixge.o
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
new file mode 100644
index 000000000000..27364b7572fc
--- /dev/null
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -0,0 +1,1310 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2016-2017, National Instruments Corp.
+ *
+ * Author: Moritz Fischer <[email protected]>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/skbuff.h>
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/ethtool.h>
+#include <linux/iopoll.h>
+
+#define TX_BD_NUM 64
+#define RX_BD_NUM 128
+
+/* Axi DMA Register definitions */
+#define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */
+#define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */
+#define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */
+#define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */
+
+#define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */
+#define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */
+#define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */
+#define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */
+
+#define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */
+#define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */
+
+#define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
+#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
+#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
+#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
+
+#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
+#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
+
+#define XAXIDMA_DELAY_SHIFT 24
+#define XAXIDMA_COALESCE_SHIFT 16
+
+#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
+#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
+#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
+#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+
+/* Default TX/RX Threshold and waitbound values for SGDMA mode */
+#define XAXIDMA_DFT_TX_THRESHOLD 24
+#define XAXIDMA_DFT_TX_WAITBOUND 254
+#define XAXIDMA_DFT_RX_THRESHOLD 24
+#define XAXIDMA_DFT_RX_WAITBOUND 254
+
+#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
+#define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
+#define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
+#define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
+#define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
+#define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
+#define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
+#define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
+#define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
+
+#define NIXGE_REG_CTRL_OFFSET 0x4000
+#define NIXGE_REG_INFO 0x00
+#define NIXGE_REG_MAC_CTL 0x04
+#define NIXGE_REG_PHY_CTL 0x08
+#define NIXGE_REG_LED_CTL 0x0c
+#define NIXGE_REG_MDIO_DATA 0x10
+#define NIXGE_REG_MDIO_ADDR 0x14
+#define NIXGE_REG_MDIO_OP 0x18
+#define NIXGE_REG_MDIO_CTRL 0x1c
+
+#define NIXGE_ID_LED_CTL_EN BIT(0)
+#define NIXGE_ID_LED_CTL_VAL BIT(1)
+
+#define NIXGE_MDIO_CLAUSE45 BIT(12)
+#define NIXGE_MDIO_CLAUSE22 0
+#define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
+#define NIXGE_MDIO_OP_ADDRESS 0
+#define NIXGE_MDIO_C45_WRITE BIT(0)
+#define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
+#define NIXGE_MDIO_C22_WRITE BIT(0)
+#define NIXGE_MDIO_C22_READ BIT(1)
+#define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
+#define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
+
+#define NIXGE_REG_MAC_LSB 0x1000
+#define NIXGE_REG_MAC_MSB 0x1004
+
+/* Packet size info */
+#define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
+#define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
+#define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
+#define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
+
+#define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
+#define NIXGE_MAX_JUMBO_FRAME_SIZE \
+ (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
+
+struct nixge_hw_dma_bd {
+ u32 next;
+ u32 reserved1;
+ u32 phys;
+ u32 reserved2;
+ u32 reserved3;
+ u32 reserved4;
+ u32 cntrl;
+ u32 status;
+ u32 app0;
+ u32 app1;
+ u32 app2;
+ u32 app3;
+ u32 app4;
+ u32 sw_id_offset;
+ u32 reserved5;
+ u32 reserved6;
+};
+
+struct nixge_tx_skb {
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ size_t size;
+ bool mapped_as_page;
+};
+
+struct nixge_priv {
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct device *dev;
+
+ /* Connection to PHY device */
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+
+ int link;
+ unsigned int speed;
+ unsigned int duplex;
+
+ /* MDIO bus data */
+ struct mii_bus *mii_bus; /* MII bus reference */
+
+ /* IO registers, dma functions and IRQs */
+ void __iomem *ctrl_regs;
+ void __iomem *dma_regs;
+
+ struct tasklet_struct dma_err_tasklet;
+
+ int tx_irq;
+ int rx_irq;
+ u32 last_link;
+
+ /* Buffer descriptors */
+ struct nixge_hw_dma_bd *tx_bd_v;
+ struct nixge_tx_skb *tx_skb;
+ dma_addr_t tx_bd_p;
+
+ struct nixge_hw_dma_bd *rx_bd_v;
+ dma_addr_t rx_bd_p;
+ u32 tx_bd_ci;
+ u32 tx_bd_tail;
+ u32 rx_bd_ci;
+
+ u32 coalesce_count_rx;
+ u32 coalesce_count_tx;
+};
+
+static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
+{
+ writel(val, priv->dma_regs + offset);
+}
+
+static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
+{
+ return readl(priv->dma_regs + offset);
+}
+
+static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
+{
+ writel(val, priv->ctrl_regs + offset);
+}
+
+static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
+{
+ return readl(priv->ctrl_regs + offset);
+}
+
+#define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
+ readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
+ (sleep_us), (timeout_us))
+
+#define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
+ readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
+ (sleep_us), (timeout_us))
+
+static void nixge_hw_dma_bd_release(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys,
+ NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb((struct sk_buff *)
+ (priv->rx_bd_v[i].sw_id_offset));
+ }
+
+ if (priv->rx_bd_v)
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*priv->rx_bd_v) * RX_BD_NUM,
+ priv->rx_bd_v,
+ priv->rx_bd_p);
+
+ if (priv->tx_skb)
+ devm_kfree(ndev->dev.parent, priv->tx_skb);
+
+ if (priv->tx_bd_v)
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*priv->tx_bd_v) * TX_BD_NUM,
+ priv->tx_bd_v,
+ priv->tx_bd_p);
+}
+
+static int nixge_hw_dma_bd_init(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ u32 cr;
+ int i;
+
+ /* Reset the indexes which are used for accessing the BDs */
+ priv->tx_bd_ci = 0;
+ priv->tx_bd_tail = 0;
+ priv->rx_bd_ci = 0;
+
+ /* Allocate the Tx and Rx buffer descriptors. */
+ priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*priv->tx_bd_v) * TX_BD_NUM,
+ &priv->tx_bd_p, GFP_KERNEL);
+ if (!priv->tx_bd_v)
+ goto out;
+
+ priv->tx_skb = devm_kzalloc(ndev->dev.parent,
+ sizeof(*priv->tx_skb) *
+ TX_BD_NUM,
+ GFP_KERNEL);
+ if (!priv->tx_skb)
+ goto out;
+
+ priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*priv->rx_bd_v) * RX_BD_NUM,
+ &priv->rx_bd_p, GFP_KERNEL);
+ if (!priv->rx_bd_v)
+ goto out;
+
+ for (i = 0; i < TX_BD_NUM; i++) {
+ priv->tx_bd_v[i].next = priv->tx_bd_p +
+ sizeof(*priv->tx_bd_v) *
+ ((i + 1) % TX_BD_NUM);
+ }
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ priv->rx_bd_v[i].next = priv->rx_bd_p +
+ sizeof(*priv->rx_bd_v) *
+ ((i + 1) % RX_BD_NUM);
+
+ skb = netdev_alloc_skb_ip_align(ndev,
+ NIXGE_MAX_JUMBO_FRAME_SIZE);
+ if (!skb)
+ goto out;
+
+ priv->rx_bd_v[i].sw_id_offset = (u32)skb;
+ priv->rx_bd_v[i].phys =
+ dma_map_single(ndev->dev.parent,
+ skb->data,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
+ }
+
+ /* Start updating the Rx channel control register */
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
+ (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
+ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+
+ return 0;
+out:
+ nixge_hw_dma_bd_release(ndev);
+ return -ENOMEM;
+}
+
+static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
+{
+ u32 status;
+ int err;
+
+ /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
+ * The reset process of Axi DMA takes a while to complete as all
+ * pending commands/transfers will be flushed or completed during
+ * this reset process.
+ */
+ nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
+ err = nixge_dma_poll_timeout(priv, offset, status,
+ !(status & XAXIDMA_CR_RESET_MASK), 10,
+ 1000);
+ if (err)
+ netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
+}
+
+static void nixge_device_reset(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
+ __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
+
+ if (nixge_hw_dma_bd_init(ndev))
+ netdev_err(ndev, "%s: descriptor allocation failed\n",
+ __func__);
+
+ netif_trans_update(ndev);
+}
+
+static void nixge_handle_link_change(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+
+ if (phydev->link != priv->link || phydev->speed != priv->speed ||
+ phydev->duplex != priv->duplex) {
+ priv->link = phydev->link;
+ priv->speed = phydev->speed;
+ priv->duplex = phydev->duplex;
+ phy_print_status(phydev);
+ }
+}
+
+static void nixge_tx_skb_unmap(struct nixge_priv *priv,
+ struct nixge_tx_skb *tx_skb)
+{
+ if (tx_skb->mapping) {
+ if (tx_skb->mapped_as_page)
+ dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
+ tx_skb->size, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->ndev->dev.parent,
+ tx_skb->mapping,
+ tx_skb->size, DMA_TO_DEVICE);
+ tx_skb->mapping = 0;
+ }
+
+ if (tx_skb->skb) {
+ dev_kfree_skb_any(tx_skb->skb);
+ tx_skb->skb = NULL;
+ }
+}
+
+static void nixge_start_xmit_done(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ struct nixge_hw_dma_bd *cur_p;
+ struct nixge_tx_skb *tx_skb;
+ unsigned int status = 0;
+ u32 packets = 0;
+ u32 size = 0;
+
+ cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
+ tx_skb = &priv->tx_skb[priv->tx_bd_ci];
+
+ status = cur_p->status;
+
+ while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
+ nixge_tx_skb_unmap(priv, tx_skb);
+ cur_p->status = 0;
+
+ size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+ packets++;
+
+ ++priv->tx_bd_ci;
+ priv->tx_bd_ci %= TX_BD_NUM;
+ cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
+ tx_skb = &priv->tx_skb[priv->tx_bd_ci];
+ status = cur_p->status;
+ }
+
+ ndev->stats.tx_packets += packets;
+ ndev->stats.tx_bytes += size;
+
+ if (packets)
+ netif_wake_queue(ndev);
+}
+
+static int nixge_check_tx_bd_space(struct nixge_priv *priv,
+ int num_frag)
+{
+ struct nixge_hw_dma_bd *cur_p;
+
+ cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
+ if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
+ return NETDEV_TX_BUSY;
+ return 0;
+}
+
+static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ struct nixge_hw_dma_bd *cur_p;
+ struct nixge_tx_skb *tx_skb;
+ dma_addr_t tail_p;
+ skb_frag_t *frag;
+ u32 num_frag;
+ u32 ii;
+
+ num_frag = skb_shinfo(skb)->nr_frags;
+ cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
+ tx_skb = &priv->tx_skb[priv->tx_bd_tail];
+
+ if (nixge_check_tx_bd_space(priv, num_frag)) {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ return NETDEV_TX_OK;
+ }
+
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
+ goto drop;
+
+ cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+
+ tx_skb->skb = NULL;
+ tx_skb->mapping = cur_p->phys;
+ tx_skb->size = skb_headlen(skb);
+ tx_skb->mapped_as_page = false;
+
+ for (ii = 0; ii < num_frag; ii++) {
+ ++priv->tx_bd_tail;
+ priv->tx_bd_tail %= TX_BD_NUM;
+ cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
+ tx_skb = &priv->tx_skb[priv->tx_bd_tail];
+ frag = &skb_shinfo(skb)->frags[ii];
+
+ cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
+ goto frag_err;
+
+ cur_p->cntrl = skb_frag_size(frag);
+
+ tx_skb->skb = NULL;
+ tx_skb->mapping = cur_p->phys;
+ tx_skb->size = skb_frag_size(frag);
+ tx_skb->mapped_as_page = true;
+ }
+
+ /* last buffer of the frame */
+ tx_skb->skb = skb;
+
+ cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
+ cur_p->app4 = (unsigned long)skb;
+
+ tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
+ /* Start the transfer */
+ nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+ ++priv->tx_bd_tail;
+ priv->tx_bd_tail %= TX_BD_NUM;
+
+ return NETDEV_TX_OK;
+frag_err:
+ for (; ii > 0; ii--) {
+ if (priv->tx_bd_tail)
+ priv->tx_bd_tail--;
+ else
+ priv->tx_bd_tail = TX_BD_NUM - 1;
+
+ tx_skb = &priv->tx_skb[priv->tx_bd_tail];
+ nixge_tx_skb_unmap(priv, tx_skb);
+
+ cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
+ cur_p->status = 0;
+ }
+ dma_unmap_single(priv->ndev->dev.parent,
+ tx_skb->mapping,
+ tx_skb->size, DMA_TO_DEVICE);
+drop:
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static int nixge_recv(struct net_device *ndev, int budget)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ struct sk_buff *skb, *new_skb;
+ struct nixge_hw_dma_bd *cur_p;
+ dma_addr_t tail_p = 0;
+ u32 packets = 0;
+ u32 length = 0;
+ u32 size = 0;
+
+ cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
+
+ while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
+ budget > packets)) {
+ tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
+ priv->rx_bd_ci;
+
+ skb = (struct sk_buff *)(cur_p->sw_id_offset);
+
+ length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+ if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
+ length = NIXGE_MAX_JUMBO_FRAME_SIZE;
+
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb_checksum_none_assert(skb);
+
+ /* For now mark them as CHECKSUM_NONE since
+ * we don't have offload capabilities
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+
+ napi_gro_receive(&priv->napi, skb);
+
+ size += length;
+ packets++;
+
+ new_skb = netdev_alloc_skb_ip_align(ndev,
+ NIXGE_MAX_JUMBO_FRAME_SIZE);
+ if (!new_skb)
+ return packets;
+
+ cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ NIXGE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
+ /* FIXME: bail out and clean up */
+ netdev_err(ndev, "Failed to map ...\n");
+ }
+ cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
+ cur_p->status = 0;
+ cur_p->sw_id_offset = (u32)new_skb;
+
+ ++priv->rx_bd_ci;
+ priv->rx_bd_ci %= RX_BD_NUM;
+ cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
+ }
+
+ ndev->stats.rx_packets += packets;
+ ndev->stats.rx_bytes += size;
+
+ if (tail_p)
+ nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+
+ return packets;
+}
+
+static int nixge_poll(struct napi_struct *napi, int budget)
+{
+ struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
+ int work_done;
+ u32 status, cr;
+
+ work_done = 0;
+
+ work_done = nixge_recv(priv->ndev, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
+
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ /* If there's more, reschedule, but clear */
+ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
+ napi_reschedule(napi);
+ } else {
+ /* if not, turn on RX IRQs again ... */
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
+ }
+ }
+
+ return work_done;
+}
+
+static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
+{
+ struct nixge_priv *priv = netdev_priv(_ndev);
+ struct net_device *ndev = _ndev;
+ unsigned int status;
+ u32 cr;
+
+ status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
+ nixge_start_xmit_done(priv->ndev);
+ goto out;
+ }
+ if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
+ netdev_err(ndev, "No interrupts asserted in Tx path\n");
+ return IRQ_NONE;
+ }
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ netdev_err(ndev, "DMA Tx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x\n",
+ (priv->tx_bd_v[priv->tx_bd_ci]).phys);
+
+ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Tx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Rx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&priv->dma_err_tasklet);
+ nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
+{
+ struct nixge_priv *priv = netdev_priv(_ndev);
+ struct net_device *ndev = _ndev;
+ unsigned int status;
+ u32 cr;
+
+ status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ /* Turn of IRQs because NAPI */
+ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
+
+ if (napi_schedule_prep(&priv->napi))
+ __napi_schedule(&priv->napi);
+ goto out;
+ }
+ if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
+ netdev_err(ndev, "No interrupts asserted in Rx path\n");
+ return IRQ_NONE;
+ }
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ netdev_err(ndev, "DMA Rx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x\n",
+ (priv->rx_bd_v[priv->rx_bd_ci]).phys);
+
+ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&priv->dma_err_tasklet);
+ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static void nixge_dma_err_handler(unsigned long data)
+{
+ struct nixge_priv *lp = (struct nixge_priv *)data;
+ struct nixge_hw_dma_bd *cur_p;
+ struct nixge_tx_skb *tx_skb;
+ u32 cr, i;
+
+ __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
+ __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
+
+ for (i = 0; i < TX_BD_NUM; i++) {
+ cur_p = &lp->tx_bd_v[i];
+ tx_skb = &lp->tx_skb[i];
+ nixge_tx_skb_unmap(lp, tx_skb);
+
+ cur_p->phys = 0;
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ cur_p->sw_id_offset = 0;
+ }
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ cur_p = &lp->rx_bd_v[i];
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ }
+
+ lp->tx_bd_ci = 0;
+ lp->tx_bd_tail = 0;
+ lp->rx_bd_ci = 0;
+
+ /* Start updating the Rx channel control register */
+ cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Rx channel control register */
+ nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Tx channel control register */
+ nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
+ nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting
+ */
+ nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
+ nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+}
+
+static int nixge_open(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy;
+ int ret;
+
+ nixge_device_reset(ndev);
+
+ phy = of_phy_connect(ndev, priv->phy_node,
+ &nixge_handle_link_change, 0, priv->phy_mode);
+ if (!phy)
+ return -ENODEV;
+
+ phy_start(phy);
+
+ /* Enable tasklets for Axi DMA error handling */
+ tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
+ (unsigned long)priv);
+
+ napi_enable(&priv->napi);
+
+ /* Enable interrupts for Axi DMA Tx */
+ ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
+ if (ret)
+ goto err_tx_irq;
+ /* Enable interrupts for Axi DMA Rx */
+ ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
+ if (ret)
+ goto err_rx_irq;
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_rx_irq:
+ free_irq(priv->tx_irq, ndev);
+err_tx_irq:
+ phy_stop(phy);
+ phy_disconnect(phy);
+ tasklet_kill(&priv->dma_err_tasklet);
+ netdev_err(ndev, "request_irq() failed\n");
+ return ret;
+}
+
+static int nixge_stop(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ u32 cr;
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ }
+
+ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
+ cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+
+ tasklet_kill(&priv->dma_err_tasklet);
+
+ free_irq(priv->tx_irq, ndev);
+ free_irq(priv->rx_irq, ndev);
+
+ nixge_hw_dma_bd_release(ndev);
+
+ return 0;
+}
+
+static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
+ NIXGE_MAX_JUMBO_FRAME_SIZE)
+ return -EINVAL;
+
+ ndev->mtu = new_mtu;
+
+ return 0;
+}
+
+static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
+ (ndev->dev_addr[2]) << 24 |
+ (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) |
+ (ndev->dev_addr[5] << 0));
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
+ (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
+
+ return 0;
+}
+
+static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
+{
+ int err;
+
+ err = eth_mac_addr(ndev, p);
+ if (!err)
+ __nixge_hw_set_mac_address(ndev);
+
+ return err;
+}
+
+static const struct net_device_ops nixge_netdev_ops = {
+ .ndo_open = nixge_open,
+ .ndo_stop = nixge_stop,
+ .ndo_start_xmit = nixge_start_xmit,
+ .ndo_change_mtu = nixge_change_mtu,
+ .ndo_set_mac_address = nixge_net_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *ed)
+{
+ strlcpy(ed->driver, "nixge", sizeof(ed->driver));
+ strlcpy(ed->bus_info, "platform", sizeof(ed->driver));
+}
+
+static int nixge_ethtools_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ecoalesce)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ u32 regval = 0;
+
+ regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
+ ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
+ ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ return 0;
+}
+
+static int nixge_ethtools_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ecoalesce)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
+ return -EBUSY;
+ }
+
+ if (ecoalesce->rx_coalesce_usecs ||
+ ecoalesce->rx_coalesce_usecs_irq ||
+ ecoalesce->rx_max_coalesced_frames_irq ||
+ ecoalesce->tx_coalesce_usecs ||
+ ecoalesce->tx_coalesce_usecs_irq ||
+ ecoalesce->tx_max_coalesced_frames_irq ||
+ ecoalesce->stats_block_coalesce_usecs ||
+ ecoalesce->use_adaptive_rx_coalesce ||
+ ecoalesce->use_adaptive_tx_coalesce ||
+ ecoalesce->pkt_rate_low ||
+ ecoalesce->rx_coalesce_usecs_low ||
+ ecoalesce->rx_max_coalesced_frames_low ||
+ ecoalesce->tx_coalesce_usecs_low ||
+ ecoalesce->tx_max_coalesced_frames_low ||
+ ecoalesce->pkt_rate_high ||
+ ecoalesce->rx_coalesce_usecs_high ||
+ ecoalesce->rx_max_coalesced_frames_high ||
+ ecoalesce->tx_coalesce_usecs_high ||
+ ecoalesce->tx_max_coalesced_frames_high ||
+ ecoalesce->rate_sample_interval)
+ return -EOPNOTSUPP;
+ if (ecoalesce->rx_max_coalesced_frames)
+ priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ if (ecoalesce->tx_max_coalesced_frames)
+ priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
+
+ return 0;
+}
+
+static int nixge_ethtools_set_phys_id(struct net_device *ndev,
+ enum ethtool_phys_id_state state)
+{
+ struct nixge_priv *priv = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ ctrl |= NIXGE_ID_LED_CTL_EN;
+ /* Enable identification LED override*/
+ nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
+ return 2;
+
+ case ETHTOOL_ID_ON:
+ ctrl |= NIXGE_ID_LED_CTL_VAL;
+ nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ ctrl &= ~NIXGE_ID_LED_CTL_VAL;
+ nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore LED settings */
+ ctrl &= ~NIXGE_ID_LED_CTL_EN;
+ nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops nixge_ethtool_ops = {
+ .get_drvinfo = nixge_ethtools_get_drvinfo,
+ .get_coalesce = nixge_ethtools_get_coalesce,
+ .set_coalesce = nixge_ethtools_set_coalesce,
+ .set_phys_id = nixge_ethtools_set_phys_id,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+};
+
+static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct nixge_priv *priv = bus->priv;
+ u32 status, tmp;
+ int err;
+ u16 device;
+
+ if (reg & MII_ADDR_C45) {
+ device = (reg >> 16) & 0x1f;
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
+
+ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
+ | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting address");
+ return err;
+ }
+
+ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+ } else {
+ device = reg & 0x1f;
+
+ tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+ }
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting read command");
+ return err;
+ }
+
+ status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
+
+ return status;
+}
+
+static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+{
+ struct nixge_priv *priv = bus->priv;
+ u32 status, tmp;
+ u16 device;
+ int err;
+
+ if (reg & MII_ADDR_C45) {
+ device = (reg >> 16) & 0x1f;
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
+
+ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
+ | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting address");
+ return err;
+ }
+
+ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
+ | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err)
+ dev_err(priv->dev, "timeout setting write command");
+ } else {
+ device = reg & 0x1f;
+
+ tmp = NIXGE_MDIO_CLAUSE22 |
+ NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err)
+ dev_err(priv->dev, "timeout setting write command");
+ }
+
+ return err;
+}
+
+static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
+{
+ struct mii_bus *bus;
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
+ bus->priv = priv;
+ bus->name = "nixge_mii_bus";
+ bus->read = nixge_mdio_read;
+ bus->write = nixge_mdio_write;
+ bus->parent = priv->dev;
+
+ priv->mii_bus = bus;
+
+ return of_mdiobus_register(bus, np);
+}
+
+static void *nixge_get_nvmem_address(struct device *dev)
+{
+ struct nvmem_cell *cell;
+ size_t cell_size;
+ char *mac;
+
+ cell = nvmem_cell_get(dev, "address");
+ if (IS_ERR(cell))
+ return cell;
+
+ mac = nvmem_cell_read(cell, &cell_size);
+ nvmem_cell_put(cell);
+
+ return mac;
+}
+
+static int nixge_probe(struct platform_device *pdev)
+{
+ struct nixge_priv *priv;
+ struct net_device *ndev;
+ struct resource *dmares;
+ const char *mac_addr;
+ int err;
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->features = NETIF_F_SG;
+ ndev->netdev_ops = &nixge_netdev_ops;
+ ndev->ethtool_ops = &nixge_ethtool_ops;
+
+ /* MTU range: 64 - 9000 */
+ ndev->min_mtu = 64;
+ ndev->max_mtu = NIXGE_JUMBO_MTU;
+
+ mac_addr = nixge_get_nvmem_address(&pdev->dev);
+ if (mac_addr && is_valid_ether_addr(mac_addr))
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ else
+ eth_hw_addr_random(ndev);
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->dev = &pdev->dev;
+
+ netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
+
+ dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
+ if (IS_ERR(priv->dma_regs)) {
+ netdev_err(ndev, "failed to map dma regs\n");
+ return PTR_ERR(priv->dma_regs);
+ }
+ priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
+ __nixge_hw_set_mac_address(ndev);
+
+ priv->tx_irq = platform_get_irq_byname(pdev, "tx");
+ if (priv->tx_irq < 0) {
+ netdev_err(ndev, "could not find 'tx' irq");
+ return priv->tx_irq;
+ }
+
+ priv->rx_irq = platform_get_irq_byname(pdev, "rx");
+ if (priv->rx_irq < 0) {
+ netdev_err(ndev, "could not find 'rx' irq");
+ return priv->rx_irq;
+ }
+
+ priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
+ priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+
+ err = nixge_mdio_setup(priv, pdev->dev.of_node);
+ if (err) {
+ netdev_err(ndev, "error registering mdio bus");
+ goto free_netdev;
+ }
+
+ priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+ if (priv->phy_mode < 0) {
+ netdev_err(ndev, "not find \"phy-mode\" property\n");
+ err = -EINVAL;
+ goto unregister_mdio;
+ }
+
+ priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (!priv->phy_node) {
+ netdev_err(ndev, "not find \"phy-handle\" property\n");
+ err = -EINVAL;
+ goto unregister_mdio;
+ }
+
+ err = register_netdev(priv->ndev);
+ if (err) {
+ netdev_err(ndev, "register_netdev() error (%i)\n", err);
+ goto unregister_mdio;
+ }
+
+ return 0;
+
+unregister_mdio:
+ mdiobus_unregister(priv->mii_bus);
+
+free_netdev:
+ free_netdev(ndev);
+
+ return err;
+}
+
+static int nixge_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct nixge_priv *priv = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ mdiobus_unregister(priv->mii_bus);
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id nixge_dt_ids[] = {
+ { .compatible = "ni,xge-enet-2.00", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, nixge_dt_ids);
+
+static struct platform_driver nixge_driver = {
+ .probe = nixge_probe,
+ .remove = nixge_remove,
+ .driver = {
+ .name = "nixge",
+ .of_match_table = of_match_ptr(nixge_dt_ids),
+ },
+};
+module_platform_driver(nixge_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("National Instruments XGE Management MAC");
+MODULE_AUTHOR("Moritz Fischer <[email protected]>");
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f2e8de607119..8259e8309320 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2829,9 +2829,9 @@ netxen_show_bridged_mode(struct device *dev,
}
static const struct device_attribute dev_attr_bridged_mode = {
- .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
- .show = netxen_show_bridged_mode,
- .store = netxen_store_bridged_mode,
+ .attr = { .name = "bridged_mode", .mode = 0644 },
+ .show = netxen_show_bridged_mode,
+ .store = netxen_store_bridged_mode,
};
static ssize_t
@@ -2861,7 +2861,7 @@ netxen_show_diag_mode(struct device *dev,
}
static const struct device_attribute dev_attr_diag_mode = {
- .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "diag_mode", .mode = 0644 },
.show = netxen_show_diag_mode,
.store = netxen_store_diag_mode,
};
@@ -3006,14 +3006,14 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
static const struct bin_attribute bin_attr_crb = {
- .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "crb", .mode = 0644 },
.size = 0,
.read = netxen_sysfs_read_crb,
.write = netxen_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
- .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "mem", .mode = 0644 },
.size = 0,
.read = netxen_sysfs_read_mem,
.write = netxen_sysfs_write_mem,
@@ -3142,7 +3142,7 @@ out:
}
static const struct bin_attribute bin_attr_dimm = {
- .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
+ .attr = { .name = "dimm", .mode = 0644 },
.size = sizeof(struct netxen_dimm_cfg),
.read = netxen_sysfs_read_dimm,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 69488554f4b9..e07460a68d30 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -81,6 +81,13 @@ enum qed_coalescing_mode {
QED_COAL_MODE_ENABLE
};
+enum qed_nvm_cmd {
+ QED_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
+ QED_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
+ QED_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
+ QED_GET_MCP_NVM_RESP = 0xFFFFFF00
+};
+
struct qed_eth_cb_ops;
struct qed_dev_info;
union qed_mcp_protocol_stats;
@@ -437,6 +444,11 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
+struct qed_nvm_image_info {
+ u32 num_images;
+ struct bist_nvm_image_att *image_att;
+};
+
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
@@ -561,6 +573,9 @@ struct qed_hwfn {
/* L2-related */
struct qed_l2_info *p_l2_info;
+ /* Nvm images number and attributes */
+ struct qed_nvm_image_info nvm_info;
+
struct qed_ptt *p_arfs_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index fdf37abee3d3..4926c5532fba 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -265,6 +265,7 @@ struct grc_param_defs {
u32 min;
u32 max;
bool is_preset;
+ bool is_persistent;
u32 exclude_all_preset_val;
u32 crash_preset_val;
};
@@ -1520,129 +1521,129 @@ static struct platform_defs s_platform_defs[] = {
static struct grc_param_defs s_grc_param_defs[] = {
/* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1},
+ {{1, 1, 1}, 0, 1, false, false, 1, 1},
/* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PBUF */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_VFC */
- {{0, 0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
- /* DBG_GRC_PARAM_RESERVED */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
+ {{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
/* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_IGU */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_BMB */
- {{0, 0, 0}, 0, 1, false, 0, 1},
+ {{0, 0, 0}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_STATIC */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_UNSTALL */
- {{0, 0, 0}, 0, 1, false, 0, 0},
+ {{0, 0, 0}, 0, 1, false, false, 0, 0},
/* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
- MAX_LCIDS},
+ {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
+ MAX_LCIDS, MAX_LCIDS},
/* DBG_GRC_PARAM_NUM_LTIDS */
- {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
- MAX_LTIDS},
+ {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
+ MAX_LTIDS, MAX_LTIDS},
/* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0, 0}, 0, 1, true, 0, 0},
+ {{0, 0, 0}, 0, 1, true, false, 0, 0},
/* DBG_GRC_PARAM_CRASH */
- {{0, 0, 0}, 0, 1, true, 0, 0},
+ {{0, 0, 0}, 0, 1, true, false, 0, 0},
/* DBG_GRC_PARAM_PARITY_SAFE */
- {{0, 0, 0}, 0, 1, false, 1, 0},
+ {{0, 0, 0}, 0, 1, false, false, 1, 0},
/* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_DUMP_PHY */
- {{1, 1, 1}, 0, 1, false, 0, 1},
+ {{1, 1, 1}, 0, 1, false, false, 0, 1},
/* DBG_GRC_PARAM_NO_MCP */
- {{0, 0, 0}, 0, 1, false, 0, 0},
+ {{0, 0, 0}, 0, 1, false, false, 0, 0},
/* DBG_GRC_PARAM_NO_FW_VER */
- {{0, 0, 0}, 0, 1, false, 0, 0}
+ {{0, 0, 0}, 0, 1, false, false, 0, 0}
};
static struct rss_mem_defs s_rss_mem_defs[] = {
@@ -4731,8 +4732,13 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "mcp_trace_meta", 1);
- /* Read trace meta info (trace_meta_size_bytes is dword-aligned) */
- if (mcp_access) {
+ /* If MCP Trace meta size parameter was set, use it.
+ * Otherwise, read trace meta.
+ * trace_meta_size_bytes is dword-aligned.
+ */
+ trace_meta_size_bytes =
+ qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
+ if ((!trace_meta_size_bytes || dump) && mcp_access) {
status = qed_mcp_trace_get_meta_info(p_hwfn,
p_ptt,
trace_data_size_bytes,
@@ -5063,8 +5069,9 @@ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
u32 i;
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- dev_data->grc.param_val[i] =
- s_grc_param_defs[i].default_val[dev_data->chip_id];
+ if (!s_grc_param_defs[i].is_persistent)
+ dev_data->grc.param_val[i] =
+ s_grc_param_defs[i].default_val[dev_data->chip_id];
}
enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -6071,10 +6078,14 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
/******************************** Variables **********************************/
-/* MCP Trace meta data - used in case the dump doesn't contain the meta data
- * (e.g. due to no NVRAM access).
+/* MCP Trace meta data array - used in case the dump doesn't contain the
+ * meta data (e.g. due to no NVRAM access).
*/
-static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 };
+static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 };
+
+/* Parsed MCP Trace meta data info, based on MCP trace meta array */
+static struct mcp_trace_meta s_mcp_trace_meta;
+static bool s_mcp_trace_meta_valid;
/* Temporary buffer, used for print size calculations */
static char s_temp_buf[MAX_MSG_LEN];
@@ -6104,6 +6115,9 @@ static u32 qed_read_from_cyclic_buf(void *buf,
val_ptr = (u8 *)&val;
+ /* Assume running on a LITTLE ENDIAN and the buffer is network order
+ * (BIG ENDIAN), as high order bytes are placed in lower memory address.
+ */
for (i = 0; i < num_bytes_to_read; i++) {
val_ptr[i] = bytes_buf[*offset];
*offset = qed_cyclic_add(*offset, 1, buf_size);
@@ -6185,7 +6199,7 @@ static u32 qed_read_param(u32 *dump_buf,
offset += 4;
}
- return offset / 4;
+ return (u32)offset / 4;
}
/* Reads a section header from the specified buffer.
@@ -6503,6 +6517,8 @@ static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
{
u32 i;
+ s_mcp_trace_meta_valid = false;
+
/* Release modules */
if (meta->modules) {
for (i = 0; i < meta->modules_num; i++)
@@ -6529,6 +6545,10 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
u8 *meta_buf_bytes = (u8 *)meta_buf;
u32 offset = 0, signature, i;
+ /* Free the previous meta before loading a new one. */
+ if (s_mcp_trace_meta_valid)
+ qed_mcp_trace_free_meta(p_hwfn, meta);
+
memset(meta, 0, sizeof(*meta));
/* Read first signature */
@@ -6594,31 +6614,153 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
format_len, format_ptr->format_str);
}
+ s_mcp_trace_meta_valid = true;
return DBG_STATUS_OK;
}
+/* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
+ * are printed to it. The parsing status is returned.
+ * Arguments:
+ * trace_buf - MCP trace cyclic buffer
+ * trace_buf_size - MCP trace cyclic buffer size in bytes
+ * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
+ * buffer.
+ * data_size - size in bytes of data to parse.
+ * parsed_buf - destination buffer for parsed data.
+ * parsed_bytes - size of parsed data in bytes.
+ */
+static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
+ u32 trace_buf_size,
+ u32 data_offset,
+ u32 data_size,
+ char *parsed_buf,
+ u32 *parsed_bytes)
+{
+ u32 param_mask, param_shift;
+ enum dbg_status status;
+
+ *parsed_bytes = 0;
+
+ if (!s_mcp_trace_meta_valid)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ status = DBG_STATUS_OK;
+
+ while (data_size) {
+ struct mcp_trace_format *format_ptr;
+ u8 format_level, format_module;
+ u32 params[3] = { 0, 0, 0 };
+ u32 header, format_idx, i;
+
+ if (data_size < MFW_TRACE_ENTRY_SIZE)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ header = qed_read_from_cyclic_buf(trace_buf,
+ &data_offset,
+ trace_buf_size,
+ MFW_TRACE_ENTRY_SIZE);
+ data_size -= MFW_TRACE_ENTRY_SIZE;
+ format_idx = header & MFW_TRACE_EVENTID_MASK;
+
+ /* Skip message if its index doesn't exist in the meta data */
+ if (format_idx > s_mcp_trace_meta.formats_num) {
+ u8 format_size =
+ (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
+ MFW_TRACE_PRM_SIZE_SHIFT);
+
+ if (data_size < format_size)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ data_offset = qed_cyclic_add(data_offset,
+ format_size,
+ trace_buf_size);
+ data_size -= format_size;
+ continue;
+ }
+
+ format_ptr = &s_mcp_trace_meta.formats[format_idx];
+
+ for (i = 0,
+ param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
+ param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
+ i < MCP_TRACE_FORMAT_MAX_PARAMS;
+ i++,
+ param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
+ param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
+ /* Extract param size (0..3) */
+ u8 param_size = (u8)((format_ptr->data & param_mask) >>
+ param_shift);
+
+ /* If the param size is zero, there are no other
+ * parameters.
+ */
+ if (!param_size)
+ break;
+
+ /* Size is encoded using 2 bits, where 3 is used to
+ * encode 4.
+ */
+ if (param_size == 3)
+ param_size = 4;
+
+ if (data_size < param_size)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ params[i] = qed_read_from_cyclic_buf(trace_buf,
+ &data_offset,
+ trace_buf_size,
+ param_size);
+ data_size -= param_size;
+ }
+
+ format_level = (u8)((format_ptr->data &
+ MCP_TRACE_FORMAT_LEVEL_MASK) >>
+ MCP_TRACE_FORMAT_LEVEL_SHIFT);
+ format_module = (u8)((format_ptr->data &
+ MCP_TRACE_FORMAT_MODULE_MASK) >>
+ MCP_TRACE_FORMAT_MODULE_SHIFT);
+ if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ /* Print current message to results buffer */
+ *parsed_bytes +=
+ sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
+ "%s %-8s: ",
+ s_mcp_trace_level_str[format_level],
+ s_mcp_trace_meta.modules[format_module]);
+ *parsed_bytes +=
+ sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
+ format_ptr->format_str,
+ params[0], params[1], params[2]);
+ }
+
+ /* Add string NULL terminator */
+ (*parsed_bytes)++;
+
+ return status;
+}
+
/* Parses an MCP Trace dump buffer.
* If result_buf is not NULL, the MCP Trace results are printed to it.
* In any case, the required results buffer size is assigned to
- * parsed_results_bytes.
+ * parsed_bytes.
* The parsing status is returned.
*/
static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
- char *results_buf,
- u32 *parsed_results_bytes)
+ char *parsed_buf,
+ u32 *parsed_bytes)
{
- u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords;
- u32 param_mask, param_shift, param_num_val, num_section_params;
const char *section_name, *param_name, *param_str_val;
- u32 offset, results_offset = 0;
- struct mcp_trace_meta meta;
+ u32 data_size, trace_data_dwords, trace_meta_dwords;
+ u32 offset, results_offset, parsed_buf_bytes;
+ u32 param_num_val, num_section_params;
struct mcp_trace *trace;
enum dbg_status status;
const u32 *meta_buf;
u8 *trace_buf;
- *parsed_results_bytes = 0;
+ *parsed_bytes = 0;
/* Read global_params section */
dump_buf += qed_read_section_hdr(dump_buf,
@@ -6629,7 +6771,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Print global params */
dump_buf += qed_print_section_params(dump_buf,
num_section_params,
- results_buf, &results_offset);
+ parsed_buf, &results_offset);
/* Read trace_data section */
dump_buf += qed_read_section_hdr(dump_buf,
@@ -6646,8 +6788,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
trace = (struct mcp_trace *)dump_buf;
trace_buf = (u8 *)dump_buf + sizeof(*trace);
offset = trace->trace_oldest;
- end_offset = trace->trace_prod;
- bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
+ data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
dump_buf += trace_data_dwords;
/* Read meta_data section */
@@ -6664,126 +6805,33 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
/* Choose meta data buffer */
if (!trace_meta_dwords) {
/* Dump doesn't include meta data */
- if (!s_mcp_trace_meta.ptr)
+ if (!s_mcp_trace_meta_arr.ptr)
return DBG_STATUS_MCP_TRACE_NO_META;
- meta_buf = s_mcp_trace_meta.ptr;
+ meta_buf = s_mcp_trace_meta_arr.ptr;
} else {
/* Dump includes meta data */
meta_buf = dump_buf;
}
/* Allocate meta data memory */
- status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
+ status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta);
if (status != DBG_STATUS_OK)
- goto free_mem;
-
- /* Ignore the level and modules masks - just print everything that is
- * already in the buffer.
- */
- while (bytes_left) {
- struct mcp_trace_format *format_ptr;
- u8 format_level, format_module;
- u32 params[3] = { 0, 0, 0 };
- u32 header, format_idx, i;
-
- if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
- status = DBG_STATUS_MCP_TRACE_BAD_DATA;
- goto free_mem;
- }
-
- header = qed_read_from_cyclic_buf(trace_buf,
- &offset,
- trace->size,
- MFW_TRACE_ENTRY_SIZE);
- bytes_left -= MFW_TRACE_ENTRY_SIZE;
- format_idx = header & MFW_TRACE_EVENTID_MASK;
-
- /* Skip message if its index doesn't exist in the meta data */
- if (format_idx > meta.formats_num) {
- u8 format_size =
- (u8)((header &
- MFW_TRACE_PRM_SIZE_MASK) >>
- MFW_TRACE_PRM_SIZE_SHIFT);
-
- if (bytes_left < format_size) {
- status = DBG_STATUS_MCP_TRACE_BAD_DATA;
- goto free_mem;
- }
-
- offset = qed_cyclic_add(offset,
- format_size, trace->size);
- bytes_left -= format_size;
- continue;
- }
-
- format_ptr = &meta.formats[format_idx];
-
- for (i = 0,
- param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
- MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
- i < MCP_TRACE_FORMAT_MAX_PARAMS;
- i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
- param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
- /* Extract param size (0..3) */
- u8 param_size =
- (u8)((format_ptr->data &
- param_mask) >> param_shift);
-
- /* If the param size is zero, there are no other
- * parameters.
- */
- if (!param_size)
- break;
-
- /* Size is encoded using 2 bits, where 3 is used to
- * encode 4.
- */
- if (param_size == 3)
- param_size = 4;
-
- if (bytes_left < param_size) {
- status = DBG_STATUS_MCP_TRACE_BAD_DATA;
- goto free_mem;
- }
-
- params[i] = qed_read_from_cyclic_buf(trace_buf,
- &offset,
- trace->size,
- param_size);
-
- bytes_left -= param_size;
- }
+ return status;
- format_level =
- (u8)((format_ptr->data &
- MCP_TRACE_FORMAT_LEVEL_MASK) >>
- MCP_TRACE_FORMAT_LEVEL_SHIFT);
- format_module =
- (u8)((format_ptr->data &
- MCP_TRACE_FORMAT_MODULE_MASK) >>
- MCP_TRACE_FORMAT_MODULE_SHIFT);
- if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
- status = DBG_STATUS_MCP_TRACE_BAD_DATA;
- goto free_mem;
- }
+ status = qed_parse_mcp_trace_buf(trace_buf,
+ trace->size,
+ offset,
+ data_size,
+ parsed_buf ?
+ parsed_buf + results_offset :
+ NULL,
+ &parsed_buf_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
- /* Print current message to results buffer */
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset), "%s %-8s: ",
- s_mcp_trace_level_str[format_level],
- meta.modules[format_module]);
- results_offset +=
- sprintf(qed_get_buf_ptr(results_buf,
- results_offset),
- format_ptr->format_str, params[0], params[1],
- params[2]);
- }
+ *parsed_bytes = results_offset + parsed_buf_bytes;
-free_mem:
- *parsed_results_bytes = results_offset + 1;
- qed_mcp_trace_free_meta(p_hwfn, &meta);
- return status;
+ return DBG_STATUS_OK;
}
/* Parses a Reg FIFO dump buffer.
@@ -7291,8 +7339,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
{
- s_mcp_trace_meta.ptr = data;
- s_mcp_trace_meta.size_in_dwords = size;
+ s_mcp_trace_meta_arr.ptr = data;
+ s_mcp_trace_meta_arr.size_in_dwords = size;
}
enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
@@ -7316,6 +7364,19 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
results_buf, &parsed_buf_size);
}
+enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf,
+ u32 num_dumped_bytes,
+ char *results_buf)
+{
+ u32 parsed_bytes;
+
+ return qed_parse_mcp_trace_buf(dump_buf,
+ num_dumped_bytes,
+ 0,
+ num_dumped_bytes,
+ results_buf, &parsed_bytes);
+}
+
enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
u32 num_dumped_dwords,
@@ -7891,6 +7952,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
}
}
+ qed_set_debug_engine(cdev, org_engine);
/* mcp_trace */
rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
@@ -7903,8 +7965,6 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
}
- qed_set_debug_engine(cdev, org_engine);
-
return 0;
}
@@ -7929,9 +7989,10 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
}
+ qed_set_debug_engine(cdev, org_engine);
+
/* Engine common */
regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
- qed_set_debug_engine(cdev, org_engine);
return regs_len;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index cdb3eec0f68c..d2ad5e92c74f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -407,6 +407,7 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
"pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
/* init pq params */
+ qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
qm_info->num_vports;
qm_info->qm_pq_params[pq_idx].tc_id = tc;
@@ -727,8 +728,9 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
pq = &(qm_info->qm_pq_params[i]);
DP_VERBOSE(p_hwfn,
NETIF_MSG_HW,
- "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
qm_info->start_pq + i,
+ pq->port_id,
pq->vport_id,
pq->tc_id, pq->wrr_group, pq->rl_valid);
}
@@ -2930,6 +2932,12 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
+static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->nvm_info.image_att);
+ p_hwfn->nvm_info.image_att = NULL;
+}
+
static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
void __iomem *p_regview,
void __iomem *p_doorbells,
@@ -2993,12 +3001,25 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
}
+ /* NVRAM info initialization and population */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_mcp_nvm_info_populate(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to populate nvm info shadow\n");
+ goto err2;
+ }
+ }
+
/* Allocate the init RT array and initialize the init-ops engine */
rc = qed_init_alloc(p_hwfn);
if (rc)
- goto err2;
+ goto err3;
return rc;
+err3:
+ if (IS_LEAD_HWFN(p_hwfn))
+ qed_nvm_info_free(p_hwfn);
err2:
if (IS_LEAD_HWFN(p_hwfn))
qed_iov_free_hw_info(p_hwfn->cdev);
@@ -3054,6 +3075,7 @@ int qed_hw_prepare(struct qed_dev *cdev,
if (rc) {
if (IS_PF(cdev)) {
qed_init_free(p_hwfn);
+ qed_nvm_info_free(p_hwfn);
qed_mcp_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
}
@@ -3086,6 +3108,8 @@ void qed_hw_remove(struct qed_dev *cdev)
}
qed_iov_free_hw_info(cdev);
+
+ qed_nvm_info_free(p_hwfn);
}
static void qed_chain_free_next_ptr(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index de873d770575..7f5ec42dde48 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -612,7 +612,7 @@ struct e4_xstorm_core_conn_ag_ctx {
__le16 reserved16;
__le16 tx_bd_cons;
__le16 tx_bd_or_spq_prod;
- __le16 word5;
+ __le16 updated_qm_pq_id;
__le16 conn_dpi;
u8 byte3;
u8 byte4;
@@ -1005,7 +1005,9 @@ enum fw_flow_ctrl_mode {
enum gft_profile_type {
GFT_PROFILE_TYPE_4_TUPLE,
GFT_PROFILE_TYPE_L4_DST_PORT,
- GFT_PROFILE_TYPE_IP_DST_PORT,
+ GFT_PROFILE_TYPE_IP_DST_ADDR,
+ GFT_PROFILE_TYPE_IP_SRC_ADDR,
+ GFT_PROFILE_TYPE_TUNNEL_TYPE,
MAX_GFT_PROFILE_TYPE
};
@@ -1133,7 +1135,7 @@ struct protocol_dcb_data {
u8 dcb_priority;
u8 dcb_tc;
u8 dscp_val;
- u8 reserved0;
+ u8 dcb_dont_add_vlan0;
};
/* Update tunnel configuration */
@@ -1932,7 +1934,7 @@ enum bin_dbg_buffer_type {
/* Attention bit mapping */
struct dbg_attn_bit_mapping {
- __le16 data;
+ u16 data;
#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
@@ -1941,11 +1943,12 @@ struct dbg_attn_bit_mapping {
/* Attention block per-type data */
struct dbg_attn_block_type_data {
- __le16 names_offset;
- __le16 reserved1;
+ u16 names_offset;
+ u16 reserved1;
u8 num_regs;
u8 reserved2;
- __le16 regs_offset;
+ u16 regs_offset;
+
};
/* Block attentions */
@@ -1955,15 +1958,15 @@ struct dbg_attn_block {
/* Attention register result */
struct dbg_attn_reg_result {
- __le32 data;
+ u32 data;
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
- __le16 block_attn_offset;
- __le16 reserved;
- __le32 sts_val;
- __le32 mask_val;
+ u16 block_attn_offset;
+ u16 reserved;
+ u32 sts_val;
+ u32 mask_val;
};
/* Attention block result */
@@ -1974,13 +1977,13 @@ struct dbg_attn_block_result {
#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
- __le16 names_offset;
+ u16 names_offset;
struct dbg_attn_reg_result reg_results[15];
};
/* Mode header */
struct dbg_mode_hdr {
- __le16 data;
+ u16 data;
#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
@@ -1990,14 +1993,14 @@ struct dbg_mode_hdr {
/* Attention register */
struct dbg_attn_reg {
struct dbg_mode_hdr mode;
- __le16 block_attn_offset;
- __le32 data;
+ u16 block_attn_offset;
+ u32 data;
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
- __le32 sts_clr_address;
- __le32 mask_address;
+ u32 sts_clr_address;
+ u32 mask_address;
};
/* Attention types */
@@ -2011,14 +2014,14 @@ enum dbg_attn_type {
struct dbg_bus_block {
u8 num_of_lines;
u8 has_latency_events;
- __le16 lines_offset;
+ u16 lines_offset;
};
/* Debug Bus block user data */
struct dbg_bus_block_user_data {
u8 num_of_lines;
u8 has_latency_events;
- __le16 names_offset;
+ u16 names_offset;
};
/* Block Debug line data */
@@ -2042,12 +2045,12 @@ struct dbg_dump_cond_hdr {
/* Memory data for registers dump */
struct dbg_dump_mem {
- __le32 dword0;
+ u32 dword0;
#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
- __le32 dword1;
+ u32 dword1;
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
@@ -2058,7 +2061,7 @@ struct dbg_dump_mem {
/* Register data for registers dump */
struct dbg_dump_reg {
- __le32 data;
+ u32 data;
#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_DUMP_REG_ADDRESS_SHIFT 0
#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
@@ -2069,7 +2072,7 @@ struct dbg_dump_reg {
/* Split header for registers dump */
struct dbg_dump_split_hdr {
- __le32 hdr;
+ u32 hdr;
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
@@ -2079,33 +2082,33 @@ struct dbg_dump_split_hdr {
/* Condition header for idle check */
struct dbg_idle_chk_cond_hdr {
struct dbg_mode_hdr mode; /* Mode header */
- __le16 data_size; /* size in dwords of the data following this header */
+ u16 data_size; /* size in dwords of the data following this header */
};
/* Idle Check condition register */
struct dbg_idle_chk_cond_reg {
- __le32 data;
+ u32 data;
#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
- __le16 num_entries;
+ u16 num_entries;
u8 entry_size;
u8 start_entry;
};
/* Idle Check info register */
struct dbg_idle_chk_info_reg {
- __le32 data;
+ u32 data;
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
- __le16 size; /* register size in dwords */
+ u16 size; /* register size in dwords */
struct dbg_mode_hdr mode; /* Mode header */
};
@@ -2117,8 +2120,8 @@ union dbg_idle_chk_reg {
/* Idle Check result header */
struct dbg_idle_chk_result_hdr {
- __le16 rule_id; /* Failing rule index */
- __le16 mem_entry_id; /* Failing memory entry index */
+ u16 rule_id; /* Failing rule index */
+ u16 mem_entry_id; /* Failing memory entry index */
u8 num_dumped_cond_regs; /* number of dumped condition registers */
u8 num_dumped_info_regs; /* number of dumped condition registers */
u8 severity; /* from dbg_idle_chk_severity_types enum */
@@ -2133,29 +2136,29 @@ struct dbg_idle_chk_result_reg_hdr {
#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
u8 start_entry; /* index of the first checked entry */
- __le16 size; /* register size in dwords */
+ u16 size; /* register size in dwords */
};
/* Idle Check rule */
struct dbg_idle_chk_rule {
- __le16 rule_id; /* Idle Check rule ID */
+ u16 rule_id; /* Idle Check rule ID */
u8 severity; /* value from dbg_idle_chk_severity_types enum */
u8 cond_id; /* Condition ID */
u8 num_cond_regs; /* number of condition registers */
u8 num_info_regs; /* number of info registers */
u8 num_imms; /* number of immediates in the condition */
u8 reserved1;
- __le16 reg_offset; /* offset of this rules registers in the idle check
- * register array (in dbg_idle_chk_reg units).
- */
- __le16 imm_offset; /* offset of this rules immediate values in the
- * immediate values array (in dwords).
- */
+ u16 reg_offset; /* offset of this rules registers in the idle check
+ * register array (in dbg_idle_chk_reg units).
+ */
+ u16 imm_offset; /* offset of this rules immediate values in the
+ * immediate values array (in dwords).
+ */
};
/* Idle Check rule parsing data */
struct dbg_idle_chk_rule_parsing_data {
- __le32 data;
+ u32 data;
#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
@@ -2175,7 +2178,7 @@ enum dbg_idle_chk_severity_types {
/* Debug Bus block data */
struct dbg_bus_block_data {
- __le16 data;
+ u16 data;
#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
@@ -2238,15 +2241,15 @@ struct dbg_bus_trigger_state_data {
/* Debug Bus memory address */
struct dbg_bus_mem_addr {
- __le32 lo;
- __le32 hi;
+ u32 lo;
+ u32 hi;
};
/* Debug Bus PCI buffer data */
struct dbg_bus_pci_buf_data {
struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
- __le32 size; /* PCI buffer size in bytes */
+ u32 size; /* PCI buffer size in bytes */
};
/* Debug Bus Storm EID range filter params */
@@ -2276,15 +2279,15 @@ struct dbg_bus_storm_data {
u8 eid_range_not_mask;
u8 cid_filter_en;
union dbg_bus_storm_eid_params eid_filter_params;
- __le32 cid;
+ u32 cid;
};
/* Debug Bus data */
struct dbg_bus_data {
- __le32 app_version;
+ u32 app_version;
u8 state;
u8 hw_dwords;
- __le16 hw_id_mask;
+ u16 hw_id_mask;
u8 num_enabled_blocks;
u8 num_enabled_storms;
u8 target;
@@ -2295,7 +2298,7 @@ struct dbg_bus_data {
u8 adding_filter;
u8 filter_pre_trigger;
u8 filter_post_trigger;
- __le16 reserved;
+ u16 reserved;
u8 trigger_en;
struct dbg_bus_trigger_state_data trigger_states[3];
u8 next_trigger_state;
@@ -2391,8 +2394,8 @@ enum dbg_bus_targets {
struct dbg_grc_data {
u8 params_initialized;
u8 reserved1;
- __le16 reserved2;
- __le32 param_val[48];
+ u16 reserved2;
+ u32 param_val[48];
};
/* Debug GRC params */
@@ -2414,7 +2417,7 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_CAU,
DBG_GRC_PARAM_DUMP_QM,
DBG_GRC_PARAM_DUMP_MCP,
- DBG_GRC_PARAM_RESERVED,
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
DBG_GRC_PARAM_DUMP_CFC,
DBG_GRC_PARAM_DUMP_IGU,
DBG_GRC_PARAM_DUMP_BRB,
@@ -2526,10 +2529,10 @@ enum dbg_storms {
/* Idle Check data */
struct idle_chk_data {
- __le32 buf_size;
+ u32 buf_size;
u8 buf_size_set;
u8 reserved1;
- __le16 reserved2;
+ u16 reserved2;
};
/* Debug Tools data (per HW function) */
@@ -2543,7 +2546,7 @@ struct dbg_tools_data {
u8 platform_id;
u8 initialized;
u8 use_dmae;
- __le32 num_regs_read;
+ u32 num_regs_read;
};
/********************************/
@@ -2555,10 +2558,10 @@ struct dbg_tools_data {
/* BRB RAM init requirements */
struct init_brb_ram_req {
- __le32 guranteed_per_tc;
- __le32 headroom_per_tc;
- __le32 min_pkt_size;
- __le32 max_ports_per_engine;
+ u32 guranteed_per_tc;
+ u32 headroom_per_tc;
+ u32 min_pkt_size;
+ u32 max_ports_per_engine;
u8 num_active_tcs[MAX_NUM_PORTS];
};
@@ -2566,21 +2569,21 @@ struct init_brb_ram_req {
struct init_ets_tc_req {
u8 use_sp;
u8 use_wfq;
- __le16 weight;
+ u16 weight;
};
/* ETS init requirements */
struct init_ets_req {
- __le32 mtu;
+ u32 mtu;
struct init_ets_tc_req tc_req[NUM_OF_TCS];
};
/* NIG LB RL init requirements */
struct init_nig_lb_rl_req {
- __le16 lb_mac_rate;
- __le16 lb_rate;
- __le32 mtu;
- __le16 tc_rate[NUM_OF_PHYS_TCS];
+ u16 lb_mac_rate;
+ u16 lb_rate;
+ u32 mtu;
+ u16 tc_rate[NUM_OF_PHYS_TCS];
};
/* NIG TC mapping for each priority */
@@ -2598,9 +2601,9 @@ struct init_nig_pri_tc_map_req {
struct init_qm_port_params {
u8 active;
u8 active_phys_tcs;
- __le16 num_pbf_cmd_lines;
- __le16 num_btb_blocks;
- __le16 reserved;
+ u16 num_pbf_cmd_lines;
+ u16 num_btb_blocks;
+ u16 reserved;
};
/* QM per-PQ init parameters */
@@ -2609,13 +2612,16 @@ struct init_qm_pq_params {
u8 tc_id;
u8 wrr_group;
u8 rl_valid;
+ u8 port_id;
+ u8 reserved0;
+ u16 reserved1;
};
/* QM per-vport init parameters */
struct init_qm_vport_params {
- __le32 vport_rl;
- __le16 vport_wfq;
- __le16 first_tx_pq_id[NUM_OF_TCS];
+ u32 vport_rl;
+ u16 vport_wfq;
+ u16 first_tx_pq_id[NUM_OF_TCS];
};
/**************************************/
@@ -2639,8 +2645,8 @@ enum chip_ids {
};
struct fw_asserts_ram_section {
- __le16 section_ram_line_offset;
- __le16 section_ram_line_size;
+ u16 section_ram_line_offset;
+ u16 section_ram_line_size;
u8 list_dword_offset;
u8 list_element_dword_size;
u8 list_num_elements;
@@ -2713,8 +2719,8 @@ enum init_split_types {
/* Binary buffer header */
struct bin_buffer_hdr {
- __le32 offset;
- __le32 length;
+ u32 offset;
+ u32 length;
};
/* Binary init buffer types */
@@ -2729,7 +2735,7 @@ enum bin_init_buffer_type {
/* init array header: raw */
struct init_array_raw_hdr {
- __le32 data;
+ u32 data;
#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
@@ -2738,7 +2744,7 @@ struct init_array_raw_hdr {
/* init array header: standard */
struct init_array_standard_hdr {
- __le32 data;
+ u32 data;
#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
@@ -2747,7 +2753,7 @@ struct init_array_standard_hdr {
/* init array header: zipped */
struct init_array_zipped_hdr {
- __le32 data;
+ u32 data;
#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
@@ -2756,7 +2762,7 @@ struct init_array_zipped_hdr {
/* init array header: pattern */
struct init_array_pattern_hdr {
- __le32 data;
+ u32 data;
#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
@@ -2783,41 +2789,41 @@ enum init_array_types {
/* init operation: callback */
struct init_callback_op {
- __le32 op_data;
+ u32 op_data;
#define INIT_CALLBACK_OP_OP_MASK 0xF
#define INIT_CALLBACK_OP_OP_SHIFT 0
#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
- __le16 callback_id;
- __le16 block_id;
+ u16 callback_id;
+ u16 block_id;
};
/* init operation: delay */
struct init_delay_op {
- __le32 op_data;
+ u32 op_data;
#define INIT_DELAY_OP_OP_MASK 0xF
#define INIT_DELAY_OP_OP_SHIFT 0
#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_DELAY_OP_RESERVED_SHIFT 4
- __le32 delay;
+ u32 delay;
};
/* init operation: if_mode */
struct init_if_mode_op {
- __le32 op_data;
+ u32 op_data;
#define INIT_IF_MODE_OP_OP_MASK 0xF
#define INIT_IF_MODE_OP_OP_SHIFT 0
#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
- __le16 reserved2;
- __le16 modes_buf_offset;
+ u16 reserved2;
+ u16 modes_buf_offset;
};
/* init operation: if_phase */
struct init_if_phase_op {
- __le32 op_data;
+ u32 op_data;
#define INIT_IF_PHASE_OP_OP_MASK 0xF
#define INIT_IF_PHASE_OP_OP_SHIFT 0
#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
@@ -2826,7 +2832,7 @@ struct init_if_phase_op {
#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
- __le32 phase_data;
+ u32 phase_data;
#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF
#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
@@ -2845,31 +2851,31 @@ enum init_mode_ops {
/* init operation: raw */
struct init_raw_op {
- __le32 op_data;
+ u32 op_data;
#define INIT_RAW_OP_OP_MASK 0xF
#define INIT_RAW_OP_OP_SHIFT 0
#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF
#define INIT_RAW_OP_PARAM1_SHIFT 4
- __le32 param2;
+ u32 param2;
};
/* init array params */
struct init_op_array_params {
- __le16 size;
- __le16 offset;
+ u16 size;
+ u16 offset;
};
/* Write init operation arguments */
union init_write_args {
- __le32 inline_val;
- __le32 zeros_count;
- __le32 array_offset;
+ u32 inline_val;
+ u32 zeros_count;
+ u32 array_offset;
struct init_op_array_params runtime;
};
/* init operation: write */
struct init_write_op {
- __le32 data;
+ u32 data;
#define INIT_WRITE_OP_OP_MASK 0xF
#define INIT_WRITE_OP_OP_SHIFT 0
#define INIT_WRITE_OP_SOURCE_MASK 0x7
@@ -2885,7 +2891,7 @@ struct init_write_op {
/* init operation: read */
struct init_read_op {
- __le32 op_data;
+ u32 op_data;
#define INIT_READ_OP_OP_MASK 0xF
#define INIT_READ_OP_OP_SHIFT 0
#define INIT_READ_OP_POLL_TYPE_MASK 0xF
@@ -2894,7 +2900,7 @@ struct init_read_op {
#define INIT_READ_OP_RESERVED_SHIFT 8
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT 9
- __le32 expected_val;
+ u32 expected_val;
};
/* Init operations union */
@@ -2939,11 +2945,11 @@ enum init_source_types {
/* Internal RAM Offsets macro data */
struct iro {
- __le32 base;
- __le16 m1;
- __le16 m2;
- __le16 m3;
- __le16 size;
+ u32 base;
+ u16 m1;
+ u16 m2;
+ u16 m3;
+ u16 size;
};
/***************************** Public Functions *******************************/
@@ -3384,6 +3390,19 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
char *results_buf);
/**
+ * @brief print_mcp_trace_line - Prints MCP Trace results for a single line
+ *
+ * @param dump_buf - mcp trace dump buffer, starting from the header.
+ * @param num_dumped_bytes - number of bytes that were dumped.
+ * @param results_buf - buffer for printing the mcp trace results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf,
+ u32 num_dumped_bytes,
+ char *results_buf);
+
+/**
* @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
* for reg_fifo results (in bytes).
*
@@ -4005,6 +4024,9 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable);
+void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool enable);
+
/**
* @brief qed_gft_disable - Disable GFT
*
@@ -4348,8 +4370,8 @@ static const struct iro iro_arr[51] = {
{0x80, 0x8, 0x0, 0x0, 0x4},
{0x84, 0x8, 0x0, 0x0, 0x2},
{0x4c48, 0x0, 0x0, 0x0, 0x78},
- {0x3e18, 0x0, 0x0, 0x0, 0x78},
- {0x2b58, 0x0, 0x0, 0x0, 0x78},
+ {0x3e38, 0x0, 0x0, 0x0, 0x78},
+ {0x2b78, 0x0, 0x0, 0x0, 0x78},
{0x4c40, 0x0, 0x0, 0x0, 0x78},
{0x4998, 0x0, 0x0, 0x0, 0x78},
{0x7f50, 0x0, 0x0, 0x0, 0x78},
@@ -4364,7 +4386,7 @@ static const struct iro iro_arr[51] = {
{0x4ba8, 0x80, 0x0, 0x0, 0x20},
{0x8158, 0x40, 0x0, 0x0, 0x30},
{0xe770, 0x60, 0x0, 0x0, 0x60},
- {0x2cf0, 0x80, 0x0, 0x0, 0x38},
+ {0x2d10, 0x80, 0x0, 0x0, 0x38},
{0xf2b8, 0x78, 0x0, 0x0, 0x78},
{0x1f8, 0x4, 0x0, 0x0, 0x4},
{0xaf20, 0x0, 0x0, 0x0, 0xf0},
@@ -4384,10 +4406,10 @@ static const struct iro iro_arr[51] = {
{0x10300, 0x18, 0x0, 0x0, 0x10},
{0xde48, 0x48, 0x0, 0x0, 0x38},
{0x10768, 0x20, 0x0, 0x0, 0x20},
- {0x2d28, 0x80, 0x0, 0x0, 0x10},
+ {0x2d48, 0x80, 0x0, 0x0, 0x10},
{0x5048, 0x10, 0x0, 0x0, 0x10},
{0xc9b8, 0x30, 0x0, 0x0, 0x10},
- {0xeee0, 0x10, 0x0, 0x0, 0x10},
+ {0xed90, 0x10, 0x0, 0x0, 0x10},
{0xa3a0, 0x10, 0x0, 0x0, 0x10},
{0x13108, 0x8, 0x0, 0x0, 0x8},
};
@@ -5151,7 +5173,7 @@ struct e4_xstorm_eth_conn_ag_ctx {
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
- __le16 tx_class;
+ __le16 updated_qm_pq_id;
__le16 conn_dpi;
u8 byte3;
u8 byte4;
@@ -5674,7 +5696,6 @@ struct eth_vport_rx_mode {
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
- __le16 reserved2[3];
};
/* Command for setting tpa parameters */
@@ -5712,7 +5733,6 @@ struct eth_vport_tx_mode {
#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
- __le16 reserved2[3];
};
/* GFT filter update action type */
@@ -5805,7 +5825,8 @@ struct rx_queue_update_ramrod_data {
u8 complete_cqe_flg;
u8 complete_event_flg;
u8 vport_id;
- u8 reserved[4];
+ u8 set_default_rss_queue;
+ u8 reserved[3];
u8 reserved1;
u8 reserved2;
u8 reserved3;
@@ -5843,7 +5864,7 @@ struct rx_update_gft_filter_data {
u8 flow_id_valid;
u8 filter_action;
u8 assert_on_error;
- u8 reserved;
+ u8 inner_vlan_removal_en;
};
/* Ramrod data for rx queue start ramrod */
@@ -5927,7 +5948,7 @@ struct vport_start_ramrod_data {
u8 zero_placement_offset;
u8 ctl_frame_mac_check_en;
u8 ctl_frame_ethtype_check_en;
- u8 reserved[5];
+ u8 reserved[1];
};
/* Ramrod data for vport stop ramrod */
@@ -5992,6 +6013,7 @@ struct vport_update_ramrod_data {
struct eth_vport_rx_mode rx_mode;
struct eth_vport_tx_mode tx_mode;
+ __le32 reserved[3];
struct eth_vport_tpa_param tpa_param;
struct vport_update_ramrod_mcast approx_mcast;
struct eth_vport_rss_config rss_config;
@@ -6213,7 +6235,7 @@ struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
- __le16 tx_class;
+ __le16 updated_qm_pq_id;
__le16 conn_dpi;
u8 byte3;
u8 byte4;
@@ -6479,7 +6501,7 @@ struct e4_xstorm_eth_hw_conn_ag_ctx {
__le16 edpm_num_bds;
__le16 tx_bd_cons;
__le16 tx_bd_prod;
- __le16 tx_class;
+ __le16 updated_qm_pq_id;
__le16 conn_dpi;
};
@@ -6703,8 +6725,8 @@ struct e4_ystorm_rdma_task_ag_ctx {
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
@@ -6759,8 +6781,8 @@ struct e4_mstorm_rdma_task_ag_ctx {
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
@@ -6814,7 +6836,7 @@ struct ustorm_rdma_task_st_ctx {
struct e4_ustorm_rdma_task_ag_ctx {
u8 reserved;
- u8 byte1;
+ u8 state;
__le16 icid;
u8 flags0;
#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
@@ -6830,8 +6852,8 @@ struct e4_ustorm_rdma_task_ag_ctx {
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
@@ -6841,8 +6863,8 @@ struct e4_ustorm_rdma_task_ag_ctx {
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
@@ -6864,10 +6886,17 @@ struct e4_ustorm_rdma_task_ag_ctx {
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
- __le32 reg2;
+ __le32 sq_cons;
__le32 dif_runt_value;
- __le32 reg4;
+ __le32 sge_index;
__le32 reg5;
+ u8 byte2;
+ u8 byte3;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg6;
+ __le32 reg7;
};
/* RDMA task context */
@@ -6970,7 +6999,9 @@ struct rdma_init_func_hdr {
u8 vf_id;
u8 vf_valid;
u8 relaxed_ordering;
- u8 reserved[2];
+ __le16 first_reg_srq_id;
+ __le32 reg_srq_base_addr;
+ __le32 reserved;
};
/* rdma function init ramrod data */
@@ -7077,13 +7108,23 @@ struct rdma_srq_context {
/* rdma create qp requester ramrod data */
struct rdma_srq_create_ramrod_data {
+ u8 flags;
+#define RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG_MASK 0x1
+#define RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG_SHIFT 0
+#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
+#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 1
+#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED1_SHIFT 2
+ u8 reserved2;
+ __le16 xrc_domain;
+ __le32 xrc_srq_cq_cid;
struct regpair pbl_base_addr;
__le16 pages_in_srq_pbl;
__le16 pd_id;
struct rdma_srq_id srq_id;
__le16 page_size;
- __le16 reserved1;
- __le32 reserved2;
+ __le16 reserved3;
+ __le32 reserved4;
struct regpair producers_addr;
};
@@ -7108,372 +7149,8 @@ enum rdma_tid_type {
MAX_RDMA_TID_TYPE
};
-struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
- u8 reserved0;
- u8 state;
- u8 flags0;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
- u8 flags1;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
- u8 flags2;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
- u8 flags3;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
- u8 flags4;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
- u8 flags5;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
- u8 flags6;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
- u8 flags7;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
- u8 flags8;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
- u8 flags9;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
- u8 flags10;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
- u8 flags11;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
- u8 flags12;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
- u8 flags13;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
- u8 flags14;
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
-#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
- u8 byte2;
- __le16 physical_q0;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le16 word4;
- __le16 word5;
- __le16 conn_dpi;
- u8 byte3;
- u8 byte4;
- u8 byte5;
- u8 byte6;
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 snd_nxt_psn;
- __le32 reg4;
-};
-
-struct e4_mstorm_rdma_conn_ag_ctx {
- u8 byte0;
- u8 byte1;
- u8 flags0;
-#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0;
- __le16 word1;
- __le32 reg0;
- __le32 reg1;
-};
-
-struct e4_tstorm_rdma_conn_ag_ctx {
- u8 reserved0;
- u8 byte1;
- u8 flags0;
-#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
- u8 flags1;
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
- u8 flags2;
-#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
- u8 flags3;
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
- u8 flags4;
-#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags5;
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0;
- __le32 reg1;
- __le32 reg2;
- __le32 reg3;
- __le32 reg4;
- __le32 reg5;
- __le32 reg6;
- __le32 reg7;
- __le32 reg8;
- u8 byte2;
- u8 byte3;
- __le16 word0;
- u8 byte4;
- u8 byte5;
- __le16 word1;
- __le16 word2;
- __le16 word3;
- __le32 reg9;
- __le32 reg10;
+struct rdma_xrc_srq_context {
+ struct regpair temp[9];
};
struct e4_tstorm_rdma_task_ag_ctx {
@@ -7561,8 +7238,8 @@ struct e4_ustorm_rdma_conn_ag_ctx {
u8 flags0;
#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1
#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
@@ -7624,214 +7301,214 @@ struct e4_ustorm_rdma_conn_ag_ctx {
__le16 word3;
};
-struct e4_xstorm_rdma_conn_ag_ctx {
+struct e4_xstorm_roce_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
+#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
-#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4
+#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -7853,48 +7530,108 @@ struct e4_xstorm_rdma_conn_ag_ctx {
__le32 reg6;
};
-struct e4_ystorm_rdma_conn_ag_ctx {
- u8 byte0;
+struct e4_tstorm_roce_conn_ag_ctx {
+ u8 reserved0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
u8 byte2;
u8 byte3;
__le16 word0;
- __le32 reg0;
- __le32 reg1;
+ u8 byte4;
+ u8 byte5;
__le16 word1;
__le16 word2;
__le16 word3;
- __le16 word4;
- __le32 reg2;
- __le32 reg3;
+ __le32 reg9;
+ __le32 reg10;
};
/* The roce storm context of Ystorm */
@@ -7933,15 +7670,15 @@ struct e4_roce_conn_context {
struct regpair ystorm_st_padding[2];
struct pstorm_roce_conn_st_ctx pstorm_st_context;
struct xstorm_roce_conn_st_ctx xstorm_st_context;
- struct regpair xstorm_st_padding[2];
- struct e4_xstorm_rdma_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+ struct e4_xstorm_roce_conn_ag_ctx xstorm_ag_context;
+ struct e4_tstorm_roce_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_roce_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
struct mstorm_roce_conn_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
struct ustorm_roce_conn_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2];
};
/* roce create qp requester ramrod data */
@@ -7955,8 +7692,8 @@ struct roce_create_qp_req_ramrod_data {
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG_SHIFT 7
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
@@ -7982,18 +7719,18 @@ struct roce_create_qp_req_ramrod_data {
__le16 udp_src_port;
__le32 src_gid[4];
__le32 dst_gid[4];
+ __le32 cq_cid;
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
u8 stats_counter_id;
u8 reserved3[7];
- __le32 cq_cid;
__le16 regular_latency_phy_queue;
__le16 dpi;
};
/* roce create qp responder ramrod data */
struct roce_create_qp_resp_ramrod_data {
- __le16 flags;
+ __le32 flags;
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
@@ -8012,6 +7749,11 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x7FFF
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 17
+ __le16 xrc_domain;
u8 max_ird;
u8 traffic_class;
u8 hop_limit;
@@ -8037,7 +7779,7 @@ struct roce_create_qp_resp_ramrod_data {
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
__le16 low_latency_phy_queue;
- u8 reserved2[6];
+ u8 reserved2[2];
__le32 cq_cid;
__le16 regular_latency_phy_queue;
__le16 dpi;
@@ -8248,6 +7990,270 @@ enum roce_ramrod_cmd_id {
MAX_ROCE_RAMROD_CMD_ID
};
+struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+ u8 flags1;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+ u8 flags7;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+ u8 flags10;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+ u8 flags11;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+};
+
+struct e4_mstorm_roce_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
struct e4_mstorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
@@ -8341,8 +8347,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
@@ -8350,8 +8356,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
@@ -8365,8 +8371,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
@@ -8374,8 +8380,8 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
u8 flags4;
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
@@ -8421,7 +8427,7 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
u8 byte5;
__le16 snd_sq_cons;
__le16 conn_dpi;
- __le16 word3;
+ __le16 force_comp_cons;
__le32 reg9;
__le32 reg10;
};
@@ -8445,8 +8451,8 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
@@ -8454,8 +8460,8 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
@@ -8469,8 +8475,8 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
@@ -8478,8 +8484,8 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
u8 flags4;
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
@@ -8724,10 +8730,10 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
@@ -8774,10 +8780,10 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7
u8 flags9;
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
@@ -8882,9 +8888,9 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
__le16 sq_cmp_cons;
__le16 sq_cons;
__le16 sq_prod;
- __le16 word5;
+ __le16 dif_error_first_sq_cons;
__le16 conn_dpi;
- u8 byte3;
+ u8 dif_error_sge_index;
u8 byte4;
u8 byte5;
u8 byte6;
@@ -8892,7 +8898,7 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
__le32 ssn;
__le32 snd_una_psn;
__le32 snd_nxt_psn;
- __le32 reg4;
+ __le32 dif_error_offset;
__le32 orq_cons_th;
__le32 orq_cons;
};
@@ -9128,6 +9134,50 @@ struct e4_xstorm_roce_resp_conn_ag_ctx {
__le32 msn_and_syndrome;
};
+struct e4_ystorm_roce_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
struct e4_ystorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
@@ -9236,7 +9286,7 @@ struct pstorm_iwarp_conn_st_ctx {
/* The iwarp storm context of Xstorm */
struct xstorm_iwarp_conn_st_ctx {
- __le32 reserved[44];
+ __le32 reserved[48];
};
struct e4_xstorm_iwarp_conn_ag_ctx {
@@ -9377,8 +9427,8 @@ struct e4_xstorm_iwarp_conn_ag_ctx {
#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5
#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
@@ -9447,8 +9497,8 @@ struct e4_xstorm_iwarp_conn_ag_ctx {
#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -9466,7 +9516,7 @@ struct e4_xstorm_iwarp_conn_ag_ctx {
__le32 reg2;
__le32 more_to_send_seq;
__le32 reg4;
- __le32 rewinded_snd_max;
+ __le32 rewinded_snd_max_or_term_opcode;
__le32 rd_msn;
__le16 irq_prod_via_msdm;
__le16 irq_cons;
@@ -9476,8 +9526,8 @@ struct e4_xstorm_iwarp_conn_ag_ctx {
__le32 orq_cons;
__le32 orq_cons_th;
u8 byte7;
- u8 max_ord;
u8 wqe_data_pad_bytes;
+ u8 max_ord;
u8 former_hq_prod;
u8 irq_prod_via_msem;
u8 byte12;
@@ -9506,8 +9556,8 @@ struct e4_tstorm_iwarp_conn_ag_ctx {
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
@@ -9622,7 +9672,6 @@ struct e4_iwarp_conn_context {
struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
- struct regpair xstorm_st_padding[2];
struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
@@ -9648,8 +9697,10 @@ struct iwarp_create_qp_ramrod_data {
#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6
+#define IWARP_CREATE_QP_RAMROD_DATA_LOW_LATENCY_QUEUE_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_LOW_LATENCY_QUEUE_EN_SHIFT 6
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 7
u8 reserved1;
__le16 pd;
__le16 sq_num_pages;
@@ -9698,6 +9749,7 @@ enum iwarp_eqe_sync_opcode {
IWARP_EVENT_TYPE_QUERY_QP,
IWARP_EVENT_TYPE_MODIFY_QP,
IWARP_EVENT_TYPE_DESTROY_QP,
+ IWARP_EVENT_TYPE_ABORT_TCP_OFFLOAD,
MAX_IWARP_EQE_SYNC_OPCODE
};
@@ -9722,6 +9774,8 @@ enum iwarp_fw_return_code {
IWARP_EXCEPTION_DETECTED_LLP_RESET,
IWARP_EXCEPTION_DETECTED_IRQ_FULL,
IWARP_EXCEPTION_DETECTED_RQ_EMPTY,
+ IWARP_EXCEPTION_DETECTED_SRQ_EMPTY,
+ IWARP_EXCEPTION_DETECTED_SRQ_LIMIT,
IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT,
IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR,
IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW,
@@ -9766,10 +9820,13 @@ struct iwarp_modify_qp_ramrod_data {
#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5
- __le32 reserved3[3];
- __le32 reserved4[8];
+#define IWARP_MODIFY_QP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 5
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x3FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 6
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le32 reserved1[10];
};
/* MPA params for Enhanced mode */
@@ -9853,6 +9910,7 @@ enum iwarp_ramrod_cmd_id {
IWARP_RAMROD_CMD_ID_QUERY_QP,
IWARP_RAMROD_CMD_ID_MODIFY_QP,
IWARP_RAMROD_CMD_ID_DESTROY_QP,
+ IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD,
MAX_IWARP_RAMROD_CMD_ID
};
@@ -11205,7 +11263,7 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
- __le32 reg2;
+ __le32 rx_tcp_checksum_err_cnt;
__le32 reg3;
__le32 reg4;
__le32 reg5;
@@ -12210,8 +12268,11 @@ struct public_drv_mb {
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
#define DRV_MSG_CODE_MCP_RESET 0x00090000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
#define DRV_MSG_CODE_MCP_HALT 0x00100000
@@ -12265,7 +12326,6 @@ struct public_drv_mb {
#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000
#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000
-
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param;
@@ -12377,7 +12437,10 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
#define FW_MSG_CODE_NVM_OK 0x00010000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
+#define FW_MSG_CODE_PHY_OK 0x00110000
#define FW_MSG_CODE_OK 0x00160000
+#define FW_MSG_CODE_ERROR 0x00170000
#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 18fb5062a83d..1365da7c8900 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -467,12 +467,11 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
u16 *p_first_tx_pq_id;
ext_voq = qed_get_ext_voq(p_hwfn,
- p_params->port_id,
+ pq_params[i].port_id,
tc_id,
p_params->max_phys_tcs_per_port);
is_vf_pq = (i >= p_params->num_pf_pqs);
- rl_valid = pq_params[i].rl_valid &&
- pq_params[i].vport_id < max_qm_global_rls;
+ rl_valid = pq_params[i].rl_valid > 0;
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
@@ -494,10 +493,11 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
}
/* Check RL ID */
- if (pq_params[i].rl_valid && pq_params[i].vport_id >=
- max_qm_global_rls)
+ if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration\n");
+ rl_valid = false;
+ }
/* Prepare PQ map entry */
QM_INIT_TX_PQ_MAP(p_hwfn,
@@ -528,7 +528,7 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
p_params->pf_id,
tc_id,
- p_params->port_id,
+ pq_params[i].port_id,
rl_valid ? 1 : 0,
rl_valid ?
pq_params[i].vport_id : 0);
@@ -603,6 +603,7 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
* Return -1 on error.
*/
static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
+
struct qed_qm_pf_rt_init_params *p_params)
{
u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
@@ -619,7 +620,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
for (i = 0; i < num_tx_pqs; i++) {
ext_voq = qed_get_ext_voq(p_hwfn,
- p_params->port_id,
+ pq_params[i].port_id,
pq_params[i].tc_id,
p_params->max_phys_tcs_per_port);
crd_reg_offset =
@@ -1020,7 +1021,8 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
*__p_var = (*__p_var & ~BIT(__offset)) | \
((enable) ? BIT(__offset) : 0); \
} while (0)
-#define PRS_ETH_TUNN_FIC_FORMAT -188897008
+#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
+#define PRS_ETH_OUTPUT_FORMAT -46832
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
@@ -1046,11 +1048,15 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val)
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) {
+ reg_val =
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ }
/* Update NIG register */
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
@@ -1077,11 +1083,15 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val)
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) {
+ reg_val =
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ }
/* Update NIG register */
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
@@ -1126,11 +1136,15 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val)
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) {
+ reg_val =
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ }
/* Update NIG register */
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
@@ -1152,6 +1166,38 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
ip_geneve_enable ? 1 : 0);
}
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
+
+void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool enable)
+{
+ u32 reg_val, cfg_mask;
+
+ /* read PRS config register */
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
+
+ /* set VXLAN_NO_L2_ENABLE mask */
+ cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
+
+ if (enable) {
+ /* set VXLAN_NO_L2_ENABLE flag */
+ reg_val |= cfg_mask;
+
+ /* update PRS FIC register */
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
+ } else {
+ /* clear VXLAN_NO_L2_ENABLE flag */
+ reg_val &= ~cfg_mask;
+ }
+
+ /* write PRS config register */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
+}
+
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
@@ -1268,6 +1314,10 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
ram_line_lo = 0;
ram_line_hi = 0;
+ /* Tunnel type */
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
@@ -1279,9 +1329,14 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
- } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
}
qed_wr(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 69051e98aff9..2a2b1018ed1d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -2375,13 +2375,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
memset(&tx_pkt, 0, sizeof(tx_pkt));
tx_pkt.num_of_bds = 1;
- tx_pkt.vlan = data->vlan;
-
- if (GET_FIELD(data->parse_flags,
- PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
- SET_FIELD(tx_pkt.bd_flags,
- CORE_TX_BD_DATA_VLAN_INSERTION, 1);
-
tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
tx_pkt.first_frag = buf->data_phys_addr +
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 893ef08a4b39..e874504e8b28 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1974,7 +1974,7 @@ qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
return GFT_PROFILE_TYPE_4_TUPLE;
if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
- return GFT_PROFILE_TYPE_IP_DST_PORT;
+ return GFT_PROFILE_TYPE_IP_DST_ADDR;
return GFT_PROFILE_TYPE_L4_DST_PORT;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index c4f14fdc4e77..74fc626b1ec1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -591,16 +591,6 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
}
}
-static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
-{
- u8 bd_flags = 0;
-
- if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
- SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
-
- return bd_flags;
-}
-
static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
@@ -744,7 +734,6 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
struct qed_ooo_buffer *p_buffer;
u16 l4_hdr_offset_w;
dma_addr_t first_frag;
- u16 parse_flags;
u8 bd_flags;
int rc;
@@ -756,8 +745,6 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
first_frag = p_buffer->rx_buffer_phys_addr +
p_buffer->placement_offset;
- parse_flags = p_buffer->parse_flags;
- bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 27832885a87f..9854aa9139af 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -45,6 +45,7 @@
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/crash_dump.h>
+#include <linux/crc32.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
@@ -1553,6 +1554,342 @@ static int qed_drain(struct qed_dev *cdev)
return 0;
}
+static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
+ struct qed_nvm_image_att *nvm_image,
+ u32 *crc)
+{
+ u8 *buf = NULL;
+ int rc, j;
+ u32 val;
+
+ /* Allocate a buffer for holding the nvram image */
+ buf = kzalloc(nvm_image->length, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Read image into buffer */
+ rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
+ buf, nvm_image->length);
+ if (rc) {
+ DP_ERR(cdev, "Failed reading image from nvm\n");
+ goto out;
+ }
+
+ /* Convert the buffer into big-endian format (excluding the
+ * closing 4 bytes of CRC).
+ */
+ for (j = 0; j < nvm_image->length - 4; j += 4) {
+ val = cpu_to_be32(*(u32 *)&buf[j]);
+ *(u32 *)&buf[j] = val;
+ }
+
+ /* Calc CRC for the "actual" image buffer, i.e. not including
+ * the last 4 CRC bytes.
+ */
+ *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
+
+out:
+ kfree(buf);
+
+ return rc;
+}
+
+/* Binary file format -
+ * /----------------------------------------------------------------------\
+ * 0B | 0x4 [command index] |
+ * 4B | image_type | Options | Number of register settings |
+ * 8B | Value |
+ * 12B | Mask |
+ * 16B | Offset |
+ * \----------------------------------------------------------------------/
+ * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
+ * Options - 0'b - Calculate & Update CRC for image
+ */
+static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
+ bool *check_resp)
+{
+ struct qed_nvm_image_att nvm_image;
+ struct qed_hwfn *p_hwfn;
+ bool is_crc = false;
+ u32 image_type;
+ int rc = 0, i;
+ u16 len;
+
+ *data += 4;
+ image_type = **data;
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
+ if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
+ break;
+ if (i == p_hwfn->nvm_info.num_images) {
+ DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
+ image_type);
+ return -ENOENT;
+ }
+
+ nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
+ nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
+ **data, image_type, nvm_image.start_addr,
+ nvm_image.start_addr + nvm_image.length - 1);
+ (*data)++;
+ is_crc = !!(**data & BIT(0));
+ (*data)++;
+ len = *((u16 *)*data);
+ *data += 2;
+ if (is_crc) {
+ u32 crc = 0;
+
+ rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
+ if (rc) {
+ DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
+ goto exit;
+ }
+
+ rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
+ (nvm_image.start_addr +
+ nvm_image.length - 4), (u8 *)&crc, 4);
+ if (rc)
+ DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
+ nvm_image.start_addr + nvm_image.length - 4, rc);
+ goto exit;
+ }
+
+ /* Iterate over the values for setting */
+ while (len) {
+ u32 offset, mask, value, cur_value;
+ u8 buf[4];
+
+ value = *((u32 *)*data);
+ *data += 4;
+ mask = *((u32 *)*data);
+ *data += 4;
+ offset = *((u32 *)*data);
+ *data += 4;
+
+ rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
+ 4);
+ if (rc) {
+ DP_ERR(cdev, "Failed reading from %08x\n",
+ nvm_image.start_addr + offset);
+ goto exit;
+ }
+
+ cur_value = le32_to_cpu(*((__le32 *)buf));
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
+ nvm_image.start_addr + offset, cur_value,
+ (cur_value & ~mask) | (value & mask), value, mask);
+ value = (value & mask) | (cur_value & ~mask);
+ rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
+ nvm_image.start_addr + offset,
+ (u8 *)&value, 4);
+ if (rc) {
+ DP_ERR(cdev, "Failed writing to %08x\n",
+ nvm_image.start_addr + offset);
+ goto exit;
+ }
+
+ len--;
+ }
+exit:
+ return rc;
+}
+
+/* Binary file format -
+ * /----------------------------------------------------------------------\
+ * 0B | 0x3 [command index] |
+ * 4B | b'0: check_response? | b'1-31 reserved |
+ * 8B | File-type | reserved |
+ * \----------------------------------------------------------------------/
+ * Start a new file of the provided type
+ */
+static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
+ const u8 **data, bool *check_resp)
+{
+ int rc;
+
+ *data += 4;
+ *check_resp = !!(**data & BIT(0));
+ *data += 4;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "About to start a new file of type %02x\n", **data);
+ rc = qed_mcp_nvm_put_file_begin(cdev, **data);
+ *data += 4;
+
+ return rc;
+}
+
+/* Binary file format -
+ * /----------------------------------------------------------------------\
+ * 0B | 0x2 [command index] |
+ * 4B | Length in bytes |
+ * 8B | b'0: check_response? | b'1-31 reserved |
+ * 12B | Offset in bytes |
+ * 16B | Data ... |
+ * \----------------------------------------------------------------------/
+ * Write data as part of a file that was previously started. Data should be
+ * of length equal to that provided in the message
+ */
+static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
+ const u8 **data, bool *check_resp)
+{
+ u32 offset, len;
+ int rc;
+
+ *data += 4;
+ len = *((u32 *)(*data));
+ *data += 4;
+ *check_resp = !!(**data & BIT(0));
+ *data += 4;
+ offset = *((u32 *)(*data));
+ *data += 4;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "About to write File-data: %08x bytes to offset %08x\n",
+ len, offset);
+
+ rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
+ (char *)(*data), len);
+ *data += len;
+
+ return rc;
+}
+
+/* Binary file format [General header] -
+ * /----------------------------------------------------------------------\
+ * 0B | QED_NVM_SIGNATURE |
+ * 4B | Length in bytes |
+ * 8B | Highest command in this batchfile | Reserved |
+ * \----------------------------------------------------------------------/
+ */
+static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
+ const struct firmware *image,
+ const u8 **data)
+{
+ u32 signature, len;
+
+ /* Check minimum size */
+ if (image->size < 12) {
+ DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
+ return -EINVAL;
+ }
+
+ /* Check signature */
+ signature = *((u32 *)(*data));
+ if (signature != QED_NVM_SIGNATURE) {
+ DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
+ return -EINVAL;
+ }
+
+ *data += 4;
+ /* Validate internal size equals the image-size */
+ len = *((u32 *)(*data));
+ if (len != image->size) {
+ DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
+ len, (u32)image->size);
+ return -EINVAL;
+ }
+
+ *data += 4;
+ /* Make sure driver familiar with all commands necessary for this */
+ if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
+ DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
+ *((u16 *)(*data)));
+ return -EINVAL;
+ }
+
+ *data += 4;
+
+ return 0;
+}
+
+static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
+{
+ const struct firmware *image;
+ const u8 *data, *data_end;
+ u32 cmd_type;
+ int rc;
+
+ rc = request_firmware(&image, name, &cdev->pdev->dev);
+ if (rc) {
+ DP_ERR(cdev, "Failed to find '%s'\n", name);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Flashing '%s' - firmware's data at %p, size is %08x\n",
+ name, image->data, (u32)image->size);
+ data = image->data;
+ data_end = data + image->size;
+
+ rc = qed_nvm_flash_image_validate(cdev, image, &data);
+ if (rc)
+ goto exit;
+
+ while (data < data_end) {
+ bool check_resp = false;
+
+ /* Parse the actual command */
+ cmd_type = *((u32 *)data);
+ switch (cmd_type) {
+ case QED_NVM_FLASH_CMD_FILE_DATA:
+ rc = qed_nvm_flash_image_file_data(cdev, &data,
+ &check_resp);
+ break;
+ case QED_NVM_FLASH_CMD_FILE_START:
+ rc = qed_nvm_flash_image_file_start(cdev, &data,
+ &check_resp);
+ break;
+ case QED_NVM_FLASH_CMD_NVM_CHANGE:
+ rc = qed_nvm_flash_image_access(cdev, &data,
+ &check_resp);
+ break;
+ default:
+ DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (rc) {
+ DP_ERR(cdev, "Command %08x failed\n", cmd_type);
+ goto exit;
+ }
+
+ /* Check response if needed */
+ if (check_resp) {
+ u32 mcp_response = 0;
+
+ if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
+ DP_ERR(cdev, "Failed getting MCP response\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ switch (mcp_response & FW_MSG_CODE_MASK) {
+ case FW_MSG_CODE_OK:
+ case FW_MSG_CODE_NVM_OK:
+ case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
+ case FW_MSG_CODE_PHY_OK:
+ break;
+ default:
+ DP_ERR(cdev, "MFW returns error: %08x\n",
+ mcp_response);
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ release_firmware(image);
+
+ return rc;
+}
+
static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
u8 *buf, u16 len)
{
@@ -1719,6 +2056,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
+ .nvm_flash = &qed_nvm_flash,
.nvm_get_image = &qed_nvm_get_image,
.set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 6f46cb11f349..ec0d425766a7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -569,6 +569,31 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_src = i_buf;
+ mb_params.data_src_size = (u8)i_txn_size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ return 0;
+}
+
int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 cmd,
@@ -2261,6 +2286,102 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
return rc;
}
+int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ u32 resp, param;
+ int rc;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
+ &resp, &param);
+ cdev->mcp_nvm_resp = resp;
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+int qed_mcp_nvm_write(struct qed_dev *cdev,
+ u32 cmd, u32 addr, u8 *p_buf, u32 len)
+{
+ u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = -EINVAL;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ switch (cmd) {
+ case QED_PUT_FILE_DATA:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+ break;
+ case QED_NVM_WRITE_NVRAM:
+ nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ while (buf_idx < len) {
+ buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
+ nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
+ addr) + buf_idx;
+ rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
+ &resp, &param, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc) {
+ DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_OK &&
+ resp != FW_MSG_CODE_NVM_OK &&
+ resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
+ DP_NOTICE(cdev,
+ "nvm write failed, resp = 0x%08x\n", resp);
+ rc = -EINVAL;
+ break;
+ }
+
+ /* This can be a lengthy process, and it's possible scheduler
+ * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
+ */
+ if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
+ usleep_range(1000, 2000);
+
+ buf_idx += buf_size;
+ }
+
+ cdev->mcp_nvm_resp = resp;
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 drv_mb_param = 0, rsp, param;
@@ -2303,9 +2424,9 @@ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return rc;
}
-int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *num_images)
+int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *num_images)
{
u32 drv_mb_param = 0, rsp;
int rc = 0;
@@ -2324,10 +2445,10 @@ int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct bist_nvm_image_att *p_image_att,
- u32 image_index)
+int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att,
+ u32 image_index)
{
u32 buf_size = 0, param, resp = 0, resp_param = 0;
int rc;
@@ -2351,16 +2472,71 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
return rc;
}
+int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
+{
+ struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info;
+ struct qed_ptt *p_ptt;
+ int rc;
+ u32 i;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+
+ /* Acquire from MFW the amount of available images */
+ nvm_info->num_images = 0;
+ rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
+ p_ptt, &nvm_info->num_images);
+ if (rc == -EOPNOTSUPP) {
+ DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
+ goto out;
+ } else if (rc || !nvm_info->num_images) {
+ DP_ERR(p_hwfn, "Failed getting number of images\n");
+ goto err0;
+ }
+
+ nvm_info->image_att = kmalloc(nvm_info->num_images *
+ sizeof(struct bist_nvm_image_att),
+ GFP_KERNEL);
+ if (!nvm_info->image_att) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ /* Iterate over images and get their attributes */
+ for (i = 0; i < nvm_info->num_images; i++) {
+ rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
+ &nvm_info->image_att[i], i);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed getting image index %d attributes\n", i);
+ goto err1;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
+ nvm_info->image_att[i].len);
+ }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+
+err1:
+ kfree(nvm_info->image_att);
+err0:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
static int
qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_nvm_images image_id,
struct qed_nvm_image_att *p_image_att)
{
- struct bist_nvm_image_att mfw_image_att;
enum nvm_image_type type;
- u32 num_images, i;
- int rc;
+ u32 i;
/* Translate image_id into MFW definitions */
switch (image_id) {
@@ -2376,29 +2552,18 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- /* Learn number of images, then traverse and see if one fits */
- rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
- if (rc || !num_images)
- return -EINVAL;
-
- for (i = 0; i < num_images; i++) {
- rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
- &mfw_image_att, i);
- if (rc)
- return rc;
-
- if (type == mfw_image_att.image_type)
+ for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
+ if (type == p_hwfn->nvm_info.image_att[i].image_type)
break;
- }
- if (i == num_images) {
+ if (i == p_hwfn->nvm_info.num_images) {
DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
"Failed to find nvram image of type %08x\n",
image_id);
- return -EINVAL;
+ return -ENOENT;
}
- p_image_att->start_addr = mfw_image_att.nvm_start_addr;
- p_image_att->length = mfw_image_att.len;
+ p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
+ p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index c7ec2395d1ce..8a5c988d0c3c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -443,6 +443,40 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
*/
int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
+/**
+ * @brief Write to nvm
+ *
+ * @param cdev
+ * @param addr - nvm offset
+ * @param cmd - nvm command
+ * @param p_buf - nvm write buffer
+ * @param len - buffer len
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_nvm_write(struct qed_dev *cdev,
+ u32 cmd, u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Put file begin
+ *
+ * @param cdev
+ * @param addr - nvm offset
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr);
+
+/**
+ * @brief Check latest response
+ *
+ * @param cdev
+ * @param p_buf - nvm write buffer
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf);
+
struct qed_nvm_image_att {
u32 start_addr;
u32 length;
@@ -496,9 +530,9 @@ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
*
* @return int - 0 - operation was successful.
*/
-int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *num_images);
+int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *num_images);
/**
* @brief Bist nvm test - get image attributes by index
@@ -510,10 +544,10 @@ int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
*
* @return int - 0 - operation was successful.
*/
-int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct bist_nvm_image_att *p_image_att,
- u32 image_index);
+int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att,
+ u32 image_index);
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
@@ -957,4 +991,12 @@ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
* @param p_ptt
*/
int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief Populate the nvm info shadow in the given hardware function
+ *
+ * @param p_hwfn
+ */
+int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
index 1bafc05db2b8..cf1d4476f9d8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -125,10 +125,11 @@ int qed_selftest_nvram(struct qed_dev *cdev)
}
/* Acquire from MFW the amount of available images */
- rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
+ rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, p_ptt, &num_images);
if (rc || !num_images) {
DP_ERR(p_hwfn, "Failed getting number of images\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto err0;
}
/* Iterate over images and validate CRC */
@@ -136,8 +137,8 @@ int qed_selftest_nvram(struct qed_dev *cdev)
/* This mailbox returns information about the image required for
* reading it.
*/
- rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
- &image_att, i);
+ rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
+ &image_att, i);
if (rc) {
DP_ERR(p_hwfn,
"Failed getting image index %d attributes\n",
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 4ca3847fffd4..ecbf1ded7a39 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -699,6 +699,14 @@ static u32 qede_get_link(struct net_device *dev)
return current_link.link_up;
}
+static int qede_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return edev->ops->common->nvm_flash(edev->cdev, flash->data);
+}
+
static int qede_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
@@ -1806,6 +1814,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
+ .flash_device = qede_flash_device,
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 9e5264d8773b..b48f76182049 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1858,8 +1858,9 @@ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
qdev->small_buf_release_cnt -= 8;
}
wmb();
- writel(qdev->small_buf_q_producer_index,
- &port_regs->CommonRegs.rxSmallQProducerIndex);
+ writel_relaxed(qdev->small_buf_q_producer_index,
+ &port_regs->CommonRegs.rxSmallQProducerIndex);
+ mmiowb();
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 46b0372dd032..97c146e7698a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -478,7 +478,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
wmb();
/* clear the interrupt trigger control register */
- writel(0, adapter->isr_int_vec);
+ writel_relaxed(0, adapter->isr_int_vec);
intr_val = readl(adapter->isr_int_vec);
do {
intr_val = readl(adapter->tgt_status_reg);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 287d89dd086f..891f03a7a33d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1175,81 +1175,81 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
}
static const struct device_attribute dev_attr_bridged_mode = {
- .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
- .show = qlcnic_show_bridged_mode,
- .store = qlcnic_store_bridged_mode,
+ .attr = { .name = "bridged_mode", .mode = 0644 },
+ .show = qlcnic_show_bridged_mode,
+ .store = qlcnic_store_bridged_mode,
};
static const struct device_attribute dev_attr_diag_mode = {
- .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "diag_mode", .mode = 0644 },
.show = qlcnic_show_diag_mode,
.store = qlcnic_store_diag_mode,
};
static const struct device_attribute dev_attr_beacon = {
- .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "beacon", .mode = 0644 },
.show = qlcnic_show_beacon,
.store = qlcnic_store_beacon,
};
static const struct bin_attribute bin_attr_crb = {
- .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "crb", .mode = 0644 },
.size = 0,
.read = qlcnic_sysfs_read_crb,
.write = qlcnic_sysfs_write_crb,
};
static const struct bin_attribute bin_attr_mem = {
- .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "mem", .mode = 0644 },
.size = 0,
.read = qlcnic_sysfs_read_mem,
.write = qlcnic_sysfs_write_mem,
};
static const struct bin_attribute bin_attr_npar_config = {
- .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "npar_config", .mode = 0644 },
.size = 0,
.read = qlcnic_sysfs_read_npar_config,
.write = qlcnic_sysfs_write_npar_config,
};
static const struct bin_attribute bin_attr_pci_config = {
- .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "pci_config", .mode = 0644 },
.size = 0,
.read = qlcnic_sysfs_read_pci_config,
.write = NULL,
};
static const struct bin_attribute bin_attr_port_stats = {
- .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "port_stats", .mode = 0644 },
.size = 0,
.read = qlcnic_sysfs_get_port_stats,
.write = qlcnic_sysfs_clear_port_stats,
};
static const struct bin_attribute bin_attr_esw_stats = {
- .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "esw_stats", .mode = 0644 },
.size = 0,
.read = qlcnic_sysfs_get_esw_stats,
.write = qlcnic_sysfs_clear_esw_stats,
};
static const struct bin_attribute bin_attr_esw_config = {
- .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "esw_config", .mode = 0644 },
.size = 0,
.read = qlcnic_sysfs_read_esw_config,
.write = qlcnic_sysfs_write_esw_config,
};
static const struct bin_attribute bin_attr_pm_config = {
- .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "pm_config", .mode = 0644 },
.size = 0,
.read = qlcnic_sysfs_read_pm_config,
.write = qlcnic_sysfs_write_pm_config,
};
static const struct bin_attribute bin_attr_flash = {
- .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
+ .attr = { .name = "flash", .mode = 0644 },
.size = 0,
.read = qlcnic_83xx_sysfs_flash_read_handler,
.write = qlcnic_83xx_sysfs_flash_write_handler,
@@ -1276,7 +1276,7 @@ static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
}
/* hwmon-sysfs attributes */
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+static SENSOR_DEVICE_ATTR(temp1_input, 0444,
qlcnic_hwmon_show_temp, NULL, 1);
static struct attribute *qlcnic_hwmon_attrs[] = {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 84ac50f92c9c..3e71b65a9546 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2185,6 +2185,22 @@ static inline void ql_write_db_reg(u32 val, void __iomem *addr)
}
/*
+ * Doorbell Registers:
+ * Doorbell registers are virtual registers in the PCI memory space.
+ * The space is allocated by the chip during PCI initialization. The
+ * device driver finds the doorbell address in BAR 3 in PCI config space.
+ * The registers are used to control outbound and inbound queues. For
+ * example, the producer index for an outbound queue. Each queue uses
+ * 1 4k chunk of memory. The lower half of the space is for outbound
+ * queues. The upper half is for inbound queues.
+ * Caller has to guarantee ordering.
+ */
+static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr)
+{
+ writel_relaxed(val, addr);
+}
+
+/*
* Shadow Registers:
* Outbound queues have a consumer index that is maintained by the chip.
* Inbound queues have a producer index that is maintained by the chip.
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 50038d946ced..8293c2028002 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2700,7 +2700,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
tx_ring->prod_idx = 0;
wmb();
- ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
+ ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
+ mmiowb();
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
"tx queued, slot %d, len %d\n",
tx_ring->prod_idx, skb->len);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 92b6be9c4429..51d89c86e60f 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -151,7 +151,7 @@ qcaspi_init_device_debugfs(struct qcaspi *qca)
dev_name(&qca->net_dev->dev));
return;
}
- debugfs_create_file("info", S_IFREG | S_IRUGO, device_root, qca,
+ debugfs_create_file("info", S_IFREG | 0444, device_root, qca,
&qcaspi_info_ops);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 38d9356ebcc4..d33988570217 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -312,6 +312,8 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
ep = rmnet_get_endpoint(port, priv->mux_id);
+ if (!ep)
+ return -ENODEV;
hlist_del_init_rcu(&ep->hlnode);
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 3557fe3f2bb5..306558ef36b5 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -450,16 +450,6 @@ static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
}
-static bool sh_eth_is_gether(struct sh_eth_private *mdp)
-{
- return mdp->reg_offset == sh_eth_offset_gigabit;
-}
-
-static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
-{
- return mdp->reg_offset == sh_eth_offset_fast_rz;
-}
-
static void sh_eth_select_mii(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -501,6 +491,62 @@ static void sh_eth_chip_reset(struct net_device *ndev)
mdelay(1);
}
+static int sh_eth_soft_reset(struct net_device *ndev)
+{
+ sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
+ mdelay(3);
+ sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
+
+ return 0;
+}
+
+static int sh_eth_check_soft_reset(struct net_device *ndev)
+{
+ int cnt;
+
+ for (cnt = 100; cnt > 0; cnt--) {
+ if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
+ return 0;
+ mdelay(1);
+ }
+
+ netdev_err(ndev, "Device reset failed\n");
+ return -ETIMEDOUT;
+}
+
+static int sh_eth_soft_reset_gether(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ret;
+
+ sh_eth_write(ndev, EDSR_ENALL, EDSR);
+ sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
+
+ ret = sh_eth_check_soft_reset(ndev);
+ if (ret)
+ return ret;
+
+ /* Table Init */
+ sh_eth_write(ndev, 0, TDLAR);
+ sh_eth_write(ndev, 0, TDFAR);
+ sh_eth_write(ndev, 0, TDFXR);
+ sh_eth_write(ndev, 0, TDFFR);
+ sh_eth_write(ndev, 0, RDLAR);
+ sh_eth_write(ndev, 0, RDFAR);
+ sh_eth_write(ndev, 0, RDFXR);
+ sh_eth_write(ndev, 0, RDFFR);
+
+ /* Reset HW CRC register */
+ if (mdp->cd->hw_checksum)
+ sh_eth_write(ndev, 0, CSMR);
+
+ /* Select MII mode */
+ if (mdp->cd->select_mii)
+ sh_eth_select_mii(ndev);
+
+ return ret;
+}
+
static void sh_eth_set_rate_gether(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -521,11 +567,14 @@ static void sh_eth_set_rate_gether(struct net_device *ndev)
#ifdef CONFIG_OF
/* R7S72100 */
static struct sh_eth_cpu_data r7s72100_data = {
+ .soft_reset = sh_eth_soft_reset_gether,
+
.chip_reset = sh_eth_chip_reset,
.set_duplex = sh_eth_set_duplex,
.register_type = SH_ETH_REG_FAST_RZ,
+ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD,
.ecsipr_value = ECSIPR_ICDIP,
.eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
@@ -552,8 +601,10 @@ static struct sh_eth_cpu_data r7s72100_data = {
.rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
+ .xdfar_rw = 1,
.hw_checksum = 1,
.tsu = 1,
+ .no_tx_cntrs = 1,
};
static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
@@ -565,12 +616,15 @@ static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
/* R8A7740 */
static struct sh_eth_cpu_data r8a7740_data = {
+ .soft_reset = sh_eth_soft_reset_gether,
+
.chip_reset = sh_eth_chip_reset_r8a7740,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
.register_type = SH_ETH_REG_GIGABIT,
+ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
@@ -597,10 +651,12 @@ static struct sh_eth_cpu_data r8a7740_data = {
.rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
+ .xdfar_rw = 1,
.hw_checksum = 1,
.tsu = 1,
.select_mii = 1,
.magic = 1,
+ .cexcr = 1,
};
/* There is CPU dependent code */
@@ -620,11 +676,14 @@ static void sh_eth_set_rate_rcar(struct net_device *ndev)
/* R-Car Gen1 */
static struct sh_eth_cpu_data rcar_gen1_data = {
+ .soft_reset = sh_eth_soft_reset,
+
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_rcar,
.register_type = SH_ETH_REG_FAST_RCAR,
+ .edtrr_trns = EDTRR_TRNS_ETHER,
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
@@ -647,11 +706,14 @@ static struct sh_eth_cpu_data rcar_gen1_data = {
/* R-Car Gen2 and RZ/G1 */
static struct sh_eth_cpu_data rcar_gen2_data = {
+ .soft_reset = sh_eth_soft_reset,
+
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_rcar,
.register_type = SH_ETH_REG_FAST_RCAR,
+ .edtrr_trns = EDTRR_TRNS_ETHER,
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
ECSIPR_MPDIP,
@@ -694,11 +756,14 @@ static void sh_eth_set_rate_sh7724(struct net_device *ndev)
/* SH7724 */
static struct sh_eth_cpu_data sh7724_data = {
+ .soft_reset = sh_eth_soft_reset,
+
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_sh7724,
.register_type = SH_ETH_REG_FAST_SH4,
+ .edtrr_trns = EDTRR_TRNS_ETHER,
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
@@ -736,11 +801,14 @@ static void sh_eth_set_rate_sh7757(struct net_device *ndev)
/* SH7757 */
static struct sh_eth_cpu_data sh7757_data = {
+ .soft_reset = sh_eth_soft_reset,
+
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_sh7757,
.register_type = SH_ETH_REG_FAST_SH4,
+ .edtrr_trns = EDTRR_TRNS_ETHER,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
@@ -808,12 +876,15 @@ static void sh_eth_set_rate_giga(struct net_device *ndev)
/* SH7757(GETHERC) */
static struct sh_eth_cpu_data sh7757_data_giga = {
+ .soft_reset = sh_eth_soft_reset_gether,
+
.chip_reset = sh_eth_chip_reset_giga,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_giga,
.register_type = SH_ETH_REG_GIGABIT,
+ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
@@ -841,18 +912,23 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
+ .xdfar_rw = 1,
.tsu = 1,
+ .cexcr = 1,
.dual_port = 1,
};
/* SH7734 */
static struct sh_eth_cpu_data sh7734_data = {
+ .soft_reset = sh_eth_soft_reset_gether,
+
.chip_reset = sh_eth_chip_reset,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
.register_type = SH_ETH_REG_GIGABIT,
+ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
@@ -875,20 +951,25 @@ static struct sh_eth_cpu_data sh7734_data = {
.hw_swap = 1,
.no_trimd = 1,
.no_ade = 1,
+ .xdfar_rw = 1,
.tsu = 1,
.hw_checksum = 1,
.select_mii = 1,
.magic = 1,
+ .cexcr = 1,
};
/* SH7763 */
static struct sh_eth_cpu_data sh7763_data = {
+ .soft_reset = sh_eth_soft_reset_gether,
+
.chip_reset = sh_eth_chip_reset,
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
.register_type = SH_ETH_REG_GIGABIT,
+ .edtrr_trns = EDTRR_TRNS_GETHER,
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
@@ -910,15 +991,20 @@ static struct sh_eth_cpu_data sh7763_data = {
.hw_swap = 1,
.no_trimd = 1,
.no_ade = 1,
+ .xdfar_rw = 1,
.tsu = 1,
.irq_flags = IRQF_SHARED,
.magic = 1,
+ .cexcr = 1,
.dual_port = 1,
};
static struct sh_eth_cpu_data sh7619_data = {
+ .soft_reset = sh_eth_soft_reset,
+
.register_type = SH_ETH_REG_FAST_SH3_SH2,
+ .edtrr_trns = EDTRR_TRNS_ETHER,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
@@ -935,8 +1021,11 @@ static struct sh_eth_cpu_data sh7619_data = {
};
static struct sh_eth_cpu_data sh771x_data = {
+ .soft_reset = sh_eth_soft_reset,
+
.register_type = SH_ETH_REG_FAST_SH3_SH2,
+ .edtrr_trns = EDTRR_TRNS_ETHER,
.eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
@@ -974,59 +1063,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
}
-static int sh_eth_check_reset(struct net_device *ndev)
-{
- int cnt;
-
- for (cnt = 100; cnt > 0; cnt--) {
- if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
- return 0;
- mdelay(1);
- }
-
- netdev_err(ndev, "Device reset failed\n");
- return -ETIMEDOUT;
-}
-
-static int sh_eth_reset(struct net_device *ndev)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- int ret = 0;
-
- if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
- sh_eth_write(ndev, EDSR_ENALL, EDSR);
- sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
-
- ret = sh_eth_check_reset(ndev);
- if (ret)
- return ret;
-
- /* Table Init */
- sh_eth_write(ndev, 0x0, TDLAR);
- sh_eth_write(ndev, 0x0, TDFAR);
- sh_eth_write(ndev, 0x0, TDFXR);
- sh_eth_write(ndev, 0x0, TDFFR);
- sh_eth_write(ndev, 0x0, RDLAR);
- sh_eth_write(ndev, 0x0, RDFAR);
- sh_eth_write(ndev, 0x0, RDFXR);
- sh_eth_write(ndev, 0x0, RDFFR);
-
- /* Reset HW CRC register */
- if (mdp->cd->hw_checksum)
- sh_eth_write(ndev, 0x0, CSMR);
-
- /* Select MII mode */
- if (mdp->cd->select_mii)
- sh_eth_select_mii(ndev);
- } else {
- sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
- mdelay(3);
- sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
- }
-
- return ret;
-}
-
static void sh_eth_set_receive_align(struct sk_buff *skb)
{
uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
@@ -1069,14 +1105,6 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
}
}
-static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
-{
- if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
- return EDTRR_TRNS_GETHER;
- else
- return EDTRR_TRNS_ETHER;
-}
-
struct bb_info {
void (*set_gate)(void *addr);
struct mdiobb_ctrl ctrl;
@@ -1273,8 +1301,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* Rx descriptor address set */
if (i == 0) {
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
- if (sh_eth_is_gether(mdp) ||
- sh_eth_is_rz_fast_ether(mdp))
+ if (mdp->cd->xdfar_rw)
sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
}
}
@@ -1296,8 +1323,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
if (i == 0) {
/* Tx descriptor address set */
sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
- if (sh_eth_is_gether(mdp) ||
- sh_eth_is_rz_fast_ether(mdp))
+ if (mdp->cd->xdfar_rw)
sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
}
}
@@ -1362,7 +1388,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
int ret;
/* Soft Reset */
- ret = sh_eth_reset(ndev);
+ ret = mdp->cd->soft_reset(ndev);
if (ret)
return ret;
@@ -1463,7 +1489,7 @@ static void sh_eth_dev_exit(struct net_device *ndev)
*/
msleep(2); /* max frame time at 10 Mbps < 1250 us */
sh_eth_get_stats(ndev);
- sh_eth_reset(ndev);
+ mdp->cd->soft_reset(ndev);
/* Set MAC address again */
update_mac_address(ndev);
@@ -1716,9 +1742,9 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
sh_eth_tx_free(ndev, true);
/* SH7712 BUG */
- if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
+ if (edtrr ^ mdp->cd->edtrr_trns) {
/* tx dma start */
- sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
+ sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
}
/* wakeup */
netif_wake_queue(ndev);
@@ -2477,8 +2503,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
mdp->cur_tx++;
- if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
- sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
+ if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
+ sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
return NETDEV_TX_OK;
}
@@ -2503,7 +2529,7 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- if (sh_eth_is_rz_fast_ether(mdp))
+ if (mdp->cd->no_tx_cntrs)
return &ndev->stats;
if (!mdp->is_opened)
@@ -2513,7 +2539,7 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
- if (sh_eth_is_gether(mdp)) {
+ if (mdp->cd->cexcr) {
sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
CERCR);
sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 21047d58a93f..a0416e04306a 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -469,6 +469,9 @@ struct sh_eth_rxdesc {
/* This structure is used by each CPU dependency handling. */
struct sh_eth_cpu_data {
+ /* mandatory functions */
+ int (*soft_reset)(struct net_device *ndev);
+
/* optional functions */
void (*chip_reset)(struct net_device *ndev);
void (*set_duplex)(struct net_device *ndev);
@@ -476,6 +479,7 @@ struct sh_eth_cpu_data {
/* mandatory initialize value */
int register_type;
+ u32 edtrr_trns;
u32 eesipr_value;
/* optional initialize value */
@@ -504,11 +508,14 @@ struct sh_eth_cpu_data {
unsigned rpadir:1; /* E-DMAC have RPADIR */
unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
+ unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */
unsigned hw_checksum:1; /* E-DMAC has CSMR */
unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
unsigned rmiimode:1; /* EtherC has RMIIMODE register */
unsigned rtrate:1; /* EtherC has RTRATE register */
unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */
+ unsigned no_tx_cntrs:1; /* EtherC DOES NOT have TX error counters */
+ unsigned cexcr:1; /* EtherC has CERCR/CEECR */
unsigned dual_port:1; /* Dual EtherC/E-DMAC */
};
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index fd35d8004a78..a9da1ad4b4f2 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -57,9 +57,9 @@
static int debug = -1;
static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
-module_param(eee_timer, int, S_IRUGO | S_IWUSR);
+module_param(eee_timer, int, 0644);
-module_param(debug, int, S_IRUGO | S_IWUSR);
+module_param(debug, int, 0644);
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e100273b623d..c4c45c94da77 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -96,17 +96,15 @@ struct efx_ef10_filter_table {
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
unsigned int rx_match_count;
+ struct rw_semaphore lock; /* Protects entries */
struct {
unsigned long spec; /* pointer to spec plus flag bits */
-/* BUSY flag indicates that an update is in progress. AUTO_OLD is
- * used to mark and sweep MAC filters for the device address lists.
- */
-#define EFX_EF10_FILTER_FLAG_BUSY 1UL
+/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
+/* unused flag 1UL */
#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
#define EFX_EF10_FILTER_FLAGS 3UL
u64 handle; /* firmware handle */
} *entry;
- wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */
struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
@@ -1501,6 +1499,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
/* All our allocations have been reset */
nic_data->must_realloc_vis = true;
+ nic_data->must_restore_rss_contexts = true;
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
@@ -2901,6 +2900,8 @@ static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx,
{
int rc;
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL);
if (rc)
@@ -2931,6 +2932,8 @@ static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx,
size_t outlen;
int rc, i;
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
@@ -2974,14 +2977,25 @@ static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx,
static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
{
- return efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context);
+ int rc;
+
+ mutex_lock(&efx->rss_lock);
+ rc = efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context);
+ mutex_unlock(&efx->rss_lock);
+ return rc;
}
static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx)
{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct efx_rss_context *ctx;
int rc;
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+ if (!nic_data->must_restore_rss_contexts)
+ return;
+
list_for_each_entry(ctx, &efx->rss_context.list, list) {
/* previous NIC RSS context is gone */
ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
@@ -2995,6 +3009,7 @@ static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx)
"; RSS filters may fail to be applied\n",
ctx->user_id, rc);
}
+ nic_data->must_restore_rss_contexts = false;
}
static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
@@ -4302,26 +4317,35 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
struct efx_filter_spec *spec,
bool replace_equal)
{
- struct efx_ef10_filter_table *table = efx->filter_state;
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_filter_table *table;
struct efx_filter_spec *saved_spec;
struct efx_rss_context *ctx = NULL;
unsigned int match_pri, hash;
unsigned int priv_flags;
+ bool rss_locked = false;
bool replacing = false;
+ unsigned int depth, i;
int ins_index = -1;
DEFINE_WAIT(wait);
bool is_mc_recip;
s32 rc;
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+
/* For now, only support RX filters */
if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
- EFX_FILTER_FLAG_RX)
- return -EINVAL;
+ EFX_FILTER_FLAG_RX) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
rc = efx_ef10_filter_pri(table, spec);
if (rc < 0)
- return rc;
+ goto out_unlock;
match_pri = rc;
hash = efx_ef10_filter_hash(spec);
@@ -4330,91 +4354,70 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
+ mutex_lock(&efx->rss_lock);
+ rss_locked = true;
if (spec->rss_context)
- ctx = efx_find_rss_context_entry(spec->rss_context,
- &efx->rss_context.list);
+ ctx = efx_find_rss_context_entry(efx, spec->rss_context);
else
ctx = &efx->rss_context;
- if (!ctx)
- return -ENOENT;
- if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)
- return -EOPNOTSUPP;
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
+ rc = -EOPNOTSUPP;
+ goto out_unlock;
+ }
}
/* Find any existing filters with the same match tuple or
- * else a free slot to insert at. If any of them are busy,
- * we have to wait and retry.
+ * else a free slot to insert at.
*/
- for (;;) {
- unsigned int depth = 1;
- unsigned int i;
-
- spin_lock_bh(&efx->filter_lock);
-
- for (;;) {
- i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
- saved_spec = efx_ef10_filter_entry_spec(table, i);
+ for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
- if (!saved_spec) {
- if (ins_index < 0)
- ins_index = i;
- } else if (efx_ef10_filter_equal(spec, saved_spec)) {
- if (table->entry[i].spec &
- EFX_EF10_FILTER_FLAG_BUSY)
- break;
- if (spec->priority < saved_spec->priority &&
- spec->priority != EFX_FILTER_PRI_AUTO) {
- rc = -EPERM;
- goto out_unlock;
- }
- if (!is_mc_recip) {
- /* This is the only one */
- if (spec->priority ==
- saved_spec->priority &&
- !replace_equal) {
- rc = -EEXIST;
- goto out_unlock;
- }
- ins_index = i;
- goto found;
- } else if (spec->priority >
- saved_spec->priority ||
- (spec->priority ==
- saved_spec->priority &&
- replace_equal)) {
- if (ins_index < 0)
- ins_index = i;
- else
- __set_bit(depth, mc_rem_map);
- }
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (spec->priority < saved_spec->priority &&
+ spec->priority != EFX_FILTER_PRI_AUTO) {
+ rc = -EPERM;
+ goto out_unlock;
}
-
- /* Once we reach the maximum search depth, use
- * the first suitable slot or return -EBUSY if
- * there was none
- */
- if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
- if (ins_index < 0) {
- rc = -EBUSY;
+ if (!is_mc_recip) {
+ /* This is the only one */
+ if (spec->priority ==
+ saved_spec->priority &&
+ !replace_equal) {
+ rc = -EEXIST;
goto out_unlock;
}
- goto found;
+ ins_index = i;
+ break;
+ } else if (spec->priority >
+ saved_spec->priority ||
+ (spec->priority ==
+ saved_spec->priority &&
+ replace_equal)) {
+ if (ins_index < 0)
+ ins_index = i;
+ else
+ __set_bit(depth, mc_rem_map);
}
-
- ++depth;
}
-
- prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_bh(&efx->filter_lock);
- schedule();
}
-found:
- /* Create a software table entry if necessary, and mark it
- * busy. We might yet fail to insert, but any attempt to
- * insert a conflicting filter while we're waiting for the
- * firmware must find the busy entry.
+ /* Once we reach the maximum search depth, use the first suitable
+ * slot, or return -EBUSY if there was none
*/
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
+ /* Create a software table entry if necessary. */
saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
if (saved_spec) {
if (spec->priority == EFX_FILTER_PRI_AUTO &&
@@ -4438,28 +4441,19 @@ found:
*saved_spec = *spec;
priv_flags = 0;
}
- efx_ef10_filter_set_entry(table, ins_index, saved_spec,
- priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
-
- /* Mark lower-priority multicast recipients busy prior to removal */
- if (is_mc_recip) {
- unsigned int depth, i;
-
- for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
- i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
- if (test_bit(depth, mc_rem_map))
- table->entry[i].spec |=
- EFX_EF10_FILTER_FLAG_BUSY;
- }
- }
-
- spin_unlock_bh(&efx->filter_lock);
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+ /* Actually insert the filter on the HW */
rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
ctx, replacing);
+ if (rc == -EINVAL && nic_data->must_realloc_vis)
+ /* The MC rebooted under us, causing it to reject our filter
+ * insertion as pointing to an invalid VI (spec->dmaq_id).
+ */
+ rc = -EAGAIN;
+
/* Finalise the software table entry */
- spin_lock_bh(&efx->filter_lock);
if (rc == 0) {
if (replacing) {
/* Update the fields that may differ */
@@ -4475,6 +4469,12 @@ found:
} else if (!replacing) {
kfree(saved_spec);
saved_spec = NULL;
+ } else {
+ /* We failed to replace, so the old filter is still present.
+ * Roll back the software table to reflect this. In fact the
+ * efx_ef10_filter_set_entry() call below will do the right
+ * thing, so nothing extra is needed here.
+ */
}
efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
@@ -4496,7 +4496,6 @@ found:
priv_flags = efx_ef10_filter_entry_flags(table, i);
if (rc == 0) {
- spin_unlock_bh(&efx->filter_lock);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
@@ -4504,15 +4503,12 @@ found:
rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
inbuf, sizeof(inbuf),
NULL, 0, NULL);
- spin_lock_bh(&efx->filter_lock);
}
if (rc == 0) {
kfree(saved_spec);
saved_spec = NULL;
priv_flags = 0;
- } else {
- priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
}
efx_ef10_filter_set_entry(table, i, saved_spec,
priv_flags);
@@ -4523,10 +4519,11 @@ found:
if (rc == 0)
rc = efx_ef10_make_filter_id(match_pri, ins_index);
- wake_up_all(&table->waitq);
out_unlock:
- spin_unlock_bh(&efx->filter_lock);
- finish_wait(&table->waitq, &wait);
+ if (rss_locked)
+ mutex_unlock(&efx->rss_lock);
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
return rc;
}
@@ -4539,6 +4536,8 @@ static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
* If !by_index, remove by ID
* If by_index, remove by index
* Filter ID may come from userland and must be range-checked.
+ * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
+ * for write.
*/
static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
unsigned int priority_mask,
@@ -4553,45 +4552,23 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
DEFINE_WAIT(wait);
int rc;
- /* Find the software table entry and mark it busy. Don't
- * remove it yet; any attempt to update while we're waiting
- * for the firmware must find the busy entry.
- */
- for (;;) {
- spin_lock_bh(&efx->filter_lock);
- if (!(table->entry[filter_idx].spec &
- EFX_EF10_FILTER_FLAG_BUSY))
- break;
- prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_bh(&efx->filter_lock);
- schedule();
- }
-
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec ||
(!by_index &&
efx_ef10_filter_pri(table, spec) !=
- efx_ef10_filter_get_unsafe_pri(filter_id))) {
- rc = -ENOENT;
- goto out_unlock;
- }
+ efx_ef10_filter_get_unsafe_pri(filter_id)))
+ return -ENOENT;
if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
/* Just remove flags */
spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
- rc = 0;
- goto out_unlock;
- }
-
- if (!(priority_mask & (1U << spec->priority))) {
- rc = -ENOENT;
- goto out_unlock;
+ return 0;
}
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
- spin_unlock_bh(&efx->filter_lock);
+ if (!(priority_mask & (1U << spec->priority)))
+ return -ENOENT;
if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
/* Reset to an automatic filter */
@@ -4609,7 +4586,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
&efx->rss_context,
true);
- spin_lock_bh(&efx->filter_lock);
if (rc == 0)
*spec = new_spec;
} else {
@@ -4624,7 +4600,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
inbuf, sizeof(inbuf), NULL, 0, NULL);
- spin_lock_bh(&efx->filter_lock);
if ((rc == 0) || (rc == -ENOENT)) {
/* Filter removed OK or didn't actually exist */
kfree(spec);
@@ -4636,11 +4611,6 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
}
}
- table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
- wake_up_all(&table->waitq);
-out_unlock:
- spin_unlock_bh(&efx->filter_lock);
- finish_wait(&table->waitq, &wait);
return rc;
}
@@ -4648,17 +4618,33 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
- return efx_ef10_filter_remove_internal(efx, 1U << priority,
- filter_id, false);
+ struct efx_ef10_filter_table *table;
+ int rc;
+
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
+ false);
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
}
+/* Caller must hold efx->filter_sem for read */
static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+
if (filter_id == EFX_EF10_FILTER_ID_INVALID)
return;
- efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
+
+ down_write(&table->lock);
+ efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
+ true);
+ up_write(&table->lock);
}
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@@ -4666,11 +4652,13 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
u32 filter_id, struct efx_filter_spec *spec)
{
unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
- struct efx_ef10_filter_table *table = efx->filter_state;
const struct efx_filter_spec *saved_spec;
+ struct efx_ef10_filter_table *table;
int rc;
- spin_lock_bh(&efx->filter_lock);
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (saved_spec && saved_spec->priority == priority &&
efx_ef10_filter_pri(table, saved_spec) ==
@@ -4680,13 +4668,15 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
} else {
rc = -ENOENT;
}
- spin_unlock_bh(&efx->filter_lock);
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
return rc;
}
static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority)
+ enum efx_filter_priority priority)
{
+ struct efx_ef10_filter_table *table;
unsigned int priority_mask;
unsigned int i;
int rc;
@@ -4694,31 +4684,40 @@ static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
priority_mask = (((1U << (priority + 1)) - 1) &
~(1U << EFX_FILTER_PRI_AUTO));
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
rc = efx_ef10_filter_remove_internal(efx, priority_mask,
i, true);
if (rc && rc != -ENOENT)
- return rc;
+ break;
+ rc = 0;
}
- return 0;
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return rc;
}
static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
- struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_table *table;
unsigned int filter_idx;
s32 count = 0;
- spin_lock_bh(&efx->filter_lock);
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
if (table->entry[filter_idx].spec &&
efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
priority)
++count;
}
- spin_unlock_bh(&efx->filter_lock);
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
return count;
}
@@ -4733,12 +4732,15 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 *buf, u32 size)
{
- struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_table *table;
struct efx_filter_spec *spec;
unsigned int filter_idx;
s32 count = 0;
- spin_lock_bh(&efx->filter_lock);
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_read(&table->lock);
+
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (spec && spec->priority == priority) {
@@ -4752,202 +4754,42 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
filter_idx);
}
}
- spin_unlock_bh(&efx->filter_lock);
+ up_read(&table->lock);
+ up_read(&efx->filter_sem);
return count;
}
#ifdef CONFIG_RFS_ACCEL
-static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
-
-static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
- struct efx_filter_spec *saved_spec;
- unsigned int hash, i, depth = 1;
- bool replacing = false;
- int ins_index = -1;
- u64 cookie;
- s32 rc;
-
- /* Must be an RX filter without RSS and not for a multicast
- * destination address (RFS only works for connected sockets).
- * These restrictions allow us to pass only a tiny amount of
- * data through to the completion function.
- */
- EFX_WARN_ON_PARANOID(spec->flags !=
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
- EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
- EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
-
- hash = efx_ef10_filter_hash(spec);
-
- spin_lock_bh(&efx->filter_lock);
-
- /* Find any existing filter with the same match tuple or else
- * a free slot to insert at. If an existing filter is busy,
- * we have to give up.
- */
- for (;;) {
- i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
- saved_spec = efx_ef10_filter_entry_spec(table, i);
-
- if (!saved_spec) {
- if (ins_index < 0)
- ins_index = i;
- } else if (efx_ef10_filter_equal(spec, saved_spec)) {
- if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
- rc = -EBUSY;
- goto fail_unlock;
- }
- if (spec->priority < saved_spec->priority) {
- rc = -EPERM;
- goto fail_unlock;
- }
- ins_index = i;
- break;
- }
-
- /* Once we reach the maximum search depth, use the
- * first suitable slot or return -EBUSY if there was
- * none
- */
- if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
- if (ins_index < 0) {
- rc = -EBUSY;
- goto fail_unlock;
- }
- break;
- }
-
- ++depth;
- }
-
- /* Create a software table entry if necessary, and mark it
- * busy. We might yet fail to insert, but any attempt to
- * insert a conflicting filter while we're waiting for the
- * firmware must find the busy entry.
- */
- saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
- if (saved_spec) {
- replacing = true;
- } else {
- saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
- if (!saved_spec) {
- rc = -ENOMEM;
- goto fail_unlock;
- }
- *saved_spec = *spec;
- }
- efx_ef10_filter_set_entry(table, ins_index, saved_spec,
- EFX_EF10_FILTER_FLAG_BUSY);
-
- spin_unlock_bh(&efx->filter_lock);
-
- /* Pack up the variables needed on completion */
- cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
-
- efx_ef10_filter_push_prep(efx, spec, inbuf,
- table->entry[ins_index].handle, NULL,
- replacing);
- efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
- MC_CMD_FILTER_OP_OUT_LEN,
- efx_ef10_filter_rfs_insert_complete, cookie);
-
- return ins_index;
-
-fail_unlock:
- spin_unlock_bh(&efx->filter_lock);
- return rc;
-}
-
-static void
-efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
- int rc, efx_dword_t *outbuf,
- size_t outlen_actual)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- unsigned int ins_index, dmaq_id;
- struct efx_filter_spec *spec;
- bool replacing;
-
- /* Unpack the cookie */
- replacing = cookie >> 31;
- ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
- dmaq_id = cookie & 0xffff;
-
- spin_lock_bh(&efx->filter_lock);
- spec = efx_ef10_filter_entry_spec(table, ins_index);
- if (rc == 0) {
- table->entry[ins_index].handle =
- MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
- if (replacing)
- spec->dmaq_id = dmaq_id;
- } else if (!replacing) {
- kfree(spec);
- spec = NULL;
- }
- efx_ef10_filter_set_entry(table, ins_index, spec, 0);
- spin_unlock_bh(&efx->filter_lock);
-
- wake_up_all(&table->waitq);
-}
-
-static void
-efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
- unsigned long filter_idx,
- int rc, efx_dword_t *outbuf,
- size_t outlen_actual);
-
static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int filter_idx)
{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_filter_spec *spec =
- efx_ef10_filter_entry_spec(table, filter_idx);
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_FILTER_OP_IN_HANDLE_OFST +
- MC_CMD_FILTER_OP_IN_HANDLE_LEN);
-
- if (!spec ||
- (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
- spec->priority != EFX_FILTER_PRI_HINT ||
- !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
- flow_id, filter_idx))
- return false;
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- MC_CMD_FILTER_OP_IN_OP_REMOVE);
- MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
- table->entry[filter_idx].handle);
- if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
- efx_ef10_filter_rfs_expire_complete, filter_idx))
- return false;
+ struct efx_ef10_filter_table *table;
+ struct efx_filter_spec *spec;
+ bool ret;
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
- return true;
-}
+ down_read(&efx->filter_sem);
+ table = efx->filter_state;
+ down_write(&table->lock);
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
-static void
-efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
- unsigned long filter_idx,
- int rc, efx_dword_t *outbuf,
- size_t outlen_actual)
-{
- struct efx_ef10_filter_table *table = efx->filter_state;
- struct efx_filter_spec *spec =
- efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec || spec->priority != EFX_FILTER_PRI_HINT) {
+ ret = true;
+ goto out_unlock;
+ }
- spin_lock_bh(&efx->filter_lock);
- if (rc == 0) {
- kfree(spec);
- efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
+ flow_id, filter_idx)) {
+ ret = false;
+ goto out_unlock;
}
- table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
- wake_up_all(&table->waitq);
- spin_unlock_bh(&efx->filter_lock);
+
+ ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
+ filter_idx, true) == 0;
+out_unlock:
+ up_write(&table->lock);
+ up_read(&efx->filter_sem);
+ return ret;
}
#endif /* CONFIG_RFS_ACCEL */
@@ -5142,9 +4984,9 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
table->vlan_filter =
!!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
INIT_LIST_HEAD(&table->vlan_list);
+ init_rwsem(&table->lock);
efx->filter_state = table;
- init_waitqueue_head(&table->waitq);
list_for_each_entry(vlan, &nic_data->vlan_list, list) {
rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
@@ -5186,7 +5028,8 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
if (!table)
return;
- spin_lock_bh(&efx->filter_lock);
+ down_write(&table->lock);
+ mutex_lock(&efx->rss_lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_ef10_filter_entry_spec(table, filter_idx);
@@ -5203,8 +5046,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
goto not_restored;
}
if (spec->rss_context)
- ctx = efx_find_rss_context_entry(spec->rss_context,
- &efx->rss_context.list);
+ ctx = efx_find_rss_context_entry(efx, spec->rss_context);
else
ctx = &efx->rss_context;
if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
@@ -5224,15 +5066,11 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
}
}
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
- spin_unlock_bh(&efx->filter_lock);
-
rc = efx_ef10_filter_push(efx, spec,
&table->entry[filter_idx].handle,
ctx, false);
if (rc)
failed++;
- spin_lock_bh(&efx->filter_lock);
if (rc) {
not_restored:
@@ -5244,13 +5082,11 @@ not_restored:
kfree(spec);
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
- } else {
- table->entry[filter_idx].spec &=
- ~EFX_EF10_FILTER_FLAG_BUSY;
}
}
- spin_unlock_bh(&efx->filter_lock);
+ mutex_unlock(&efx->rss_lock);
+ up_write(&table->lock);
/* This can happen validly if the MC's capabilities have changed, so
* is not an error.
@@ -5318,6 +5154,8 @@ static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
struct efx_ef10_filter_table *table = efx->filter_state;
unsigned int filter_idx;
+ efx_rwsem_assert_write_locked(&table->lock);
+
if (*id != EFX_EF10_FILTER_ID_INVALID) {
filter_idx = efx_ef10_filter_get_unsafe_id(*id);
if (!table->entry[filter_idx].spec)
@@ -5353,10 +5191,10 @@ static void efx_ef10_filter_mark_old(struct efx_nic *efx)
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_filter_vlan *vlan;
- spin_lock_bh(&efx->filter_lock);
+ down_write(&table->lock);
list_for_each_entry(vlan, &table->vlan_list, list)
_efx_ef10_filter_vlan_mark_old(efx, vlan);
- spin_unlock_bh(&efx->filter_lock);
+ up_write(&table->lock);
}
static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
@@ -5633,10 +5471,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
return rc;
}
-/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
- * flag or removes these filters, we don't need to hold the filter_lock while
- * scanning for these filters.
- */
+/* Remove filters that weren't renewed. */
static void efx_ef10_filter_remove_old(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -5645,6 +5480,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
int rc;
int i;
+ down_write(&table->lock);
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
if (READ_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
@@ -5656,6 +5492,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
remove_failed++;
}
}
+ up_write(&table->lock);
if (remove_failed)
netif_info(efx, drv, efx->net_dev,
@@ -6784,7 +6621,6 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
.filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_insert = efx_ef10_filter_rfs_insert,
.filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
@@ -6897,7 +6733,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
.filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_insert = efx_ef10_filter_rfs_insert,
.filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 7321a4cf6f4d..692dd729ee2a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -340,7 +340,10 @@ static int efx_poll(struct napi_struct *napi, int budget)
efx_update_irq_mod(efx, channel);
}
- efx_filter_rfs_expire(channel);
+#ifdef CONFIG_RFS_ACCEL
+ /* Perhaps expire some ARFS filters */
+ schedule_work(&channel->filter_work);
+#endif
/* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem
@@ -470,6 +473,10 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
tx_queue->channel = channel;
}
+#ifdef CONFIG_RFS_ACCEL
+ INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
@@ -512,6 +519,9 @@ efx_copy_channel(const struct efx_channel *old_channel)
rx_queue->buffer = NULL;
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+#ifdef CONFIG_RFS_ACCEL
+ INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
return channel;
}
@@ -1773,7 +1783,6 @@ static int efx_probe_filters(struct efx_nic *efx)
{
int rc;
- spin_lock_init(&efx->filter_lock);
init_rwsem(&efx->filter_sem);
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
@@ -2648,6 +2657,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
+ mutex_lock(&efx->rss_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
method != RESET_TYPE_DATAPATH)
efx->phy_op->fini(efx);
@@ -2703,6 +2713,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (efx->type->rx_restore_rss_contexts)
efx->type->rx_restore_rss_contexts(efx);
+ mutex_unlock(&efx->rss_lock);
down_read(&efx->filter_sem);
efx_restore_filters(efx);
up_read(&efx->filter_sem);
@@ -2721,6 +2732,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
fail:
efx->port_initialized = false;
+ mutex_unlock(&efx->rss_lock);
mutex_unlock(&efx->mac_lock);
return rc;
@@ -3007,11 +3019,15 @@ static int efx_init_struct(struct efx_nic *efx,
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
INIT_LIST_HEAD(&efx->rss_context.list);
+ mutex_init(&efx->rss_lock);
spin_lock_init(&efx->stats_lock);
efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
efx->num_mac_stats = MC_CMD_MAC_NSTATS;
BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
mutex_init(&efx->mac_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mutex_init(&efx->rps_mutex);
+#endif
efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
@@ -3079,11 +3095,14 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
* (a) this is an infrequent control-plane operation and (b) n is small (max 64)
*/
-struct efx_rss_context *efx_alloc_rss_context_entry(struct list_head *head)
+struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
{
+ struct list_head *head = &efx->rss_context.list;
struct efx_rss_context *ctx, *new;
u32 id = 1; /* Don't use zero, that refers to the master RSS context */
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
/* Search for first gap in the numbering */
list_for_each_entry(ctx, head, list) {
if (ctx->user_id != id)
@@ -3109,10 +3128,13 @@ struct efx_rss_context *efx_alloc_rss_context_entry(struct list_head *head)
return new;
}
-struct efx_rss_context *efx_find_rss_context_entry(u32 id, struct list_head *head)
+struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
{
+ struct list_head *head = &efx->rss_context.list;
struct efx_rss_context *ctx;
+ WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
list_for_each_entry(ctx, head, list)
if (ctx->user_id == id)
return ctx;
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 3429ae3f3b08..a3140e16fcef 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -170,22 +170,25 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
-static inline void efx_filter_rfs_expire(struct efx_channel *channel)
+static inline void efx_filter_rfs_expire(struct work_struct *data)
{
+ struct efx_channel *channel = container_of(data, struct efx_channel,
+ filter_work);
+
if (channel->rfs_filters_added >= 60 &&
__efx_filter_rfs_expire(channel->efx, 100))
channel->rfs_filters_added -= 60;
}
#define efx_filter_rfs_enabled() 1
#else
-static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
+static inline void efx_filter_rfs_expire(struct work_struct *data) {}
#define efx_filter_rfs_enabled() 0
#endif
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
/* RSS contexts */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct list_head *list);
-struct efx_rss_context *efx_find_rss_context_entry(u32 id, struct list_head *list);
+struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
+struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
void efx_free_rss_context_entry(struct efx_rss_context *ctx);
static inline bool efx_rss_active(struct efx_rss_context *ctx)
{
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index bb1c80d48d12..3143588ffd77 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -979,7 +979,7 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
u32 rss_context = 0;
- s32 rc;
+ s32 rc = 0;
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
@@ -989,15 +989,17 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
case ETHTOOL_GRXFH: {
struct efx_rss_context *ctx = &efx->rss_context;
+ mutex_lock(&efx->rss_lock);
if (info->flow_type & FLOW_RSS && info->rss_context) {
- ctx = efx_find_rss_context_entry(info->rss_context,
- &efx->rss_context.list);
- if (!ctx)
- return -ENOENT;
+ ctx = efx_find_rss_context_entry(efx, info->rss_context);
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
}
info->data = 0;
if (!efx_rss_active(ctx)) /* No RSS */
- return 0;
+ goto out_unlock;
switch (info->flow_type & ~FLOW_RSS) {
case UDP_V4_FLOW:
if (ctx->rx_hash_udp_4tuple)
@@ -1024,7 +1026,9 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
default:
break;
}
- return 0;
+out_unlock:
+ mutex_unlock(&efx->rss_lock);
+ return rc;
}
case ETHTOOL_GRXCLSRLCNT:
@@ -1084,6 +1088,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
+ u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS);
struct ethhdr *mac_entry = &rule->h_u.ether_spec;
struct ethhdr *mac_mask = &rule->m_u.ether_spec;
enum efx_filter_flags flags = 0;
@@ -1117,14 +1122,14 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
if (rule->flow_type & FLOW_RSS)
spec.rss_context = rss_context;
- switch (rule->flow_type & ~(FLOW_EXT | FLOW_RSS)) {
+ switch (flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_IP_PROTO);
spec.ether_type = htons(ETH_P_IP);
- spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
- IPPROTO_TCP : IPPROTO_UDP);
+ spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP
+ : IPPROTO_UDP;
if (ip_mask->ip4dst) {
if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
return -EINVAL;
@@ -1158,8 +1163,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_IP_PROTO);
spec.ether_type = htons(ETH_P_IPV6);
- spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ?
- IPPROTO_TCP : IPPROTO_UDP);
+ spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP
+ : IPPROTO_UDP;
if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
if (!ip6_mask_is_full(ip6_mask->ip6dst))
return -EINVAL;
@@ -1366,16 +1371,20 @@ static int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_rss_context *ctx;
- int rc;
+ int rc = 0;
if (!efx->type->rx_pull_rss_context_config)
return -EOPNOTSUPP;
- ctx = efx_find_rss_context_entry(rss_context, &efx->rss_context.list);
- if (!ctx)
- return -ENOENT;
+
+ mutex_lock(&efx->rss_lock);
+ ctx = efx_find_rss_context_entry(efx, rss_context);
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
rc = efx->type->rx_pull_rss_context_config(efx, ctx);
if (rc)
- return rc;
+ goto out_unlock;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
@@ -1383,7 +1392,9 @@ static int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
if (key)
memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
- return 0;
+out_unlock:
+ mutex_unlock(&efx->rss_lock);
+ return rc;
}
static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
@@ -1401,23 +1412,31 @@ static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
/* Hash function is Toeplitz, cannot be changed */
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+
+ mutex_lock(&efx->rss_lock);
+
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (delete)
+ if (delete) {
/* alloc + delete == Nothing to do */
- return -EINVAL;
- ctx = efx_alloc_rss_context_entry(&efx->rss_context.list);
- if (!ctx)
- return -ENOMEM;
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ ctx = efx_alloc_rss_context_entry(efx);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
/* Initialise indir table and key to defaults */
efx_set_default_rx_indir_table(efx, ctx);
netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
allocated = true;
} else {
- ctx = efx_find_rss_context_entry(*rss_context,
- &efx->rss_context.list);
- if (!ctx)
- return -ENOENT;
+ ctx = efx_find_rss_context_entry(efx, *rss_context);
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
}
if (delete) {
@@ -1425,7 +1444,7 @@ static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
if (!rc)
efx_free_rss_context_entry(ctx);
- return rc;
+ goto out_unlock;
}
if (!key)
@@ -1438,6 +1457,8 @@ static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
efx_free_rss_context_entry(ctx);
else
*rss_context = ctx->user_id;
+out_unlock:
+ mutex_unlock(&efx->rss_lock);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index ad001e77d554..4a19c7efdf8d 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1878,6 +1878,7 @@ struct efx_farch_filter_table {
};
struct efx_farch_filter_state {
+ struct rw_semaphore lock; /* Protects table contents */
struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
};
@@ -2397,9 +2398,13 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
if (rc)
return rc;
+ down_write(&state->lock);
+
table = &state->table[efx_farch_filter_spec_table_id(&spec)];
- if (table->size == 0)
- return -EINVAL;
+ if (table->size == 0) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
netif_vdbg(efx, hw, efx->net_dev,
"%s: type %d search_limit=%d", __func__, spec.type,
@@ -2412,8 +2417,6 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
ins_index = rep_index;
-
- spin_lock_bh(&efx->filter_lock);
} else {
/* Search concurrently for
* (1) a filter to be replaced (rep_index): any filter
@@ -2443,8 +2446,6 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
ins_index = -1;
depth = 1;
- spin_lock_bh(&efx->filter_lock);
-
for (;;) {
if (!test_bit(i, table->used_bitmap)) {
if (ins_index < 0)
@@ -2463,7 +2464,7 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
/* Case (b) */
if (ins_index < 0) {
rc = -EBUSY;
- goto out;
+ goto out_unlock;
}
rep_index = -1;
break;
@@ -2483,11 +2484,11 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
if (spec.priority == saved_spec->priority && !replace_equal) {
rc = -EEXIST;
- goto out;
+ goto out_unlock;
}
if (spec.priority < saved_spec->priority) {
rc = -EPERM;
- goto out;
+ goto out_unlock;
}
if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
@@ -2528,8 +2529,8 @@ s32 efx_farch_filter_insert(struct efx_nic *efx,
__func__, spec.type, ins_index, spec.dmaq_id);
rc = efx_farch_filter_make_id(&spec, ins_index);
-out:
- spin_unlock_bh(&efx->filter_lock);
+out_unlock:
+ up_write(&state->lock);
return rc;
}
@@ -2604,11 +2605,11 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
filter_idx = efx_farch_filter_id_index(filter_id);
if (filter_idx >= table->size)
return -ENOENT;
+ down_write(&state->lock);
spec = &table->spec[filter_idx];
- spin_lock_bh(&efx->filter_lock);
rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
- spin_unlock_bh(&efx->filter_lock);
+ up_write(&state->lock);
return rc;
}
@@ -2622,30 +2623,28 @@ int efx_farch_filter_get_safe(struct efx_nic *efx,
struct efx_farch_filter_table *table;
struct efx_farch_filter_spec *spec;
unsigned int filter_idx;
- int rc;
+ int rc = -ENOENT;
+
+ down_read(&state->lock);
table_id = efx_farch_filter_id_table_id(filter_id);
if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
- return -ENOENT;
+ goto out_unlock;
table = &state->table[table_id];
filter_idx = efx_farch_filter_id_index(filter_id);
if (filter_idx >= table->size)
- return -ENOENT;
+ goto out_unlock;
spec = &table->spec[filter_idx];
- spin_lock_bh(&efx->filter_lock);
-
if (test_bit(filter_idx, table->used_bitmap) &&
spec->priority == priority) {
efx_farch_filter_to_gen_spec(spec_buf, spec);
rc = 0;
- } else {
- rc = -ENOENT;
}
- spin_unlock_bh(&efx->filter_lock);
-
+out_unlock:
+ up_read(&state->lock);
return rc;
}
@@ -2658,13 +2657,13 @@ efx_farch_filter_table_clear(struct efx_nic *efx,
struct efx_farch_filter_table *table = &state->table[table_id];
unsigned int filter_idx;
- spin_lock_bh(&efx->filter_lock);
+ down_write(&state->lock);
for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
efx_farch_filter_remove(efx, table,
filter_idx, priority);
}
- spin_unlock_bh(&efx->filter_lock);
+ up_write(&state->lock);
}
int efx_farch_filter_clear_rx(struct efx_nic *efx,
@@ -2688,7 +2687,7 @@ u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
unsigned int filter_idx;
u32 count = 0;
- spin_lock_bh(&efx->filter_lock);
+ down_read(&state->lock);
for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
@@ -2701,7 +2700,7 @@ u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
}
}
- spin_unlock_bh(&efx->filter_lock);
+ up_read(&state->lock);
return count;
}
@@ -2716,7 +2715,7 @@ s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
unsigned int filter_idx;
s32 count = 0;
- spin_lock_bh(&efx->filter_lock);
+ down_read(&state->lock);
for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
@@ -2735,7 +2734,7 @@ s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
}
}
out:
- spin_unlock_bh(&efx->filter_lock);
+ up_read(&state->lock);
return count;
}
@@ -2749,7 +2748,7 @@ void efx_farch_filter_table_restore(struct efx_nic *efx)
efx_oword_t filter;
unsigned int filter_idx;
- spin_lock_bh(&efx->filter_lock);
+ down_write(&state->lock);
for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
table = &state->table[table_id];
@@ -2770,7 +2769,7 @@ void efx_farch_filter_table_restore(struct efx_nic *efx)
efx_farch_filter_push_rx_config(efx);
efx_farch_filter_push_tx_limits(efx);
- spin_unlock_bh(&efx->filter_lock);
+ up_write(&state->lock);
}
void efx_farch_filter_table_remove(struct efx_nic *efx)
@@ -2864,7 +2863,7 @@ void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
efx_oword_t filter;
unsigned int filter_idx;
- spin_lock_bh(&efx->filter_lock);
+ down_write(&state->lock);
for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
@@ -2896,33 +2895,30 @@ void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
efx_farch_filter_push_rx_config(efx);
- spin_unlock_bh(&efx->filter_lock);
+ up_write(&state->lock);
}
#ifdef CONFIG_RFS_ACCEL
-s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
- struct efx_filter_spec *gen_spec)
-{
- return efx_farch_filter_insert(efx, gen_spec, true);
-}
-
bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int index)
{
struct efx_farch_filter_state *state = efx->filter_state;
- struct efx_farch_filter_table *table =
- &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ struct efx_farch_filter_table *table;
+ bool ret = false;
+ down_write(&state->lock);
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
if (test_bit(index, table->used_bitmap) &&
table->spec[index].priority == EFX_FILTER_PRI_HINT &&
rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
flow_id, index)) {
efx_farch_filter_table_clear_entry(efx, table, index);
- return true;
+ ret = true;
}
- return false;
+ up_write(&state->lock);
+ return ret;
}
#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index f97da05952c7..f17751559ccc 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -298,7 +298,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
attr->limit_value = limit_value;
sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attr->name;
- attr->dev_attr.attr.mode = S_IRUGO;
+ attr->dev_attr.attr.mode = 0444;
attr->dev_attr.show = reader;
hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 2453f3849e72..5e379a83c729 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -430,6 +430,7 @@ enum efx_sync_events_state {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
+ * @filter_work: Work item for efx_filter_rfs_expire()
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
@@ -475,6 +476,7 @@ struct efx_channel {
unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL
unsigned int rfs_filters_added;
+ struct work_struct filter_work;
#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
u32 *rps_flow_id;
#endif
@@ -794,6 +796,7 @@ struct efx_rss_context {
* @rx_scatter: Scatter mode enabled for receives
* @rss_context: Main RSS context. Its @list member is the head of the list of
* RSS contexts created by user requests
+ * @rss_lock: Protects custom RSS context software state in @rss_context.list
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
* @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
@@ -841,9 +844,9 @@ struct efx_rss_context {
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
- * @filter_sem: Filter table rw_semaphore, for freeing the table
- * @filter_lock: Filter table lock, for mere content changes
+ * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
* @filter_state: Architecture-dependent filter table state
+ * @rps_mutex: Protects RPS state of all channels
* @rps_expire_channel: Next channel to check for expiry
* @rps_expire_index: Next index to check for expiry in
* @rps_expire_channel's @rps_flow_id
@@ -938,6 +941,7 @@ struct efx_nic {
int rx_packet_ts_offset;
bool rx_scatter;
struct efx_rss_context rss_context;
+ struct mutex rss_lock;
unsigned int_error_count;
unsigned long int_error_expire;
@@ -995,9 +999,9 @@ struct efx_nic {
void *loopback_selftest;
struct rw_semaphore filter_sem;
- spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
+ struct mutex rps_mutex;
unsigned int rps_expire_channel;
unsigned int rps_expire_index;
#endif
@@ -1152,10 +1156,6 @@ struct efx_udp_tunnel {
* @filter_count_rx_used: Get the number of filters in use at a given priority
* @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
* @filter_get_rx_ids: Get list of RX filters at a given priority
- * @filter_rfs_insert: Add or replace a filter for RFS. This must be
- * atomic. The hardware change may be asynchronous but should
- * not be delayed for long. It may fail if this can't be done
- * atomically.
* @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
* This must check whether the specified table entry is used by RFS
* and that rps_may_expire_flow() returns true for it.
@@ -1306,8 +1306,6 @@ struct efx_nic_type {
enum efx_filter_priority priority,
u32 *buf, u32 size);
#ifdef CONFIG_RFS_ACCEL
- s32 (*filter_rfs_insert)(struct efx_nic *efx,
- struct efx_filter_spec *spec);
bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
unsigned int index);
#endif
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index d080a414e8f2..5640034bda10 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -365,6 +365,8 @@ enum {
* @vi_base: Absolute index of first VI in this function
* @n_allocated_vis: Number of VIs allocated to this function
* @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
+ * @must_restore_rss_contexts: Flag: RSS contexts have yet to be restored after
+ * MC reboot
* @must_restore_filters: Flag: filters have yet to be restored after MC reboot
* @n_piobufs: Number of PIO buffers allocated to this function
* @wc_membase: Base address of write-combining mapping of the memory BAR
@@ -407,6 +409,7 @@ struct efx_ef10_nic_data {
unsigned int vi_base;
unsigned int n_allocated_vis;
bool must_realloc_vis;
+ bool must_restore_rss_contexts;
bool must_restore_filters;
unsigned int n_piobufs;
void __iomem *wc_membase, *pio_write_base;
@@ -601,8 +604,6 @@ s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority, u32 *buf,
u32 size);
#ifdef CONFIG_RFS_ACCEL
-s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec);
bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int index);
#endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index cfe76aad79ee..95682831484e 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -827,14 +827,67 @@ MODULE_PARM_DESC(rx_refill_threshold,
#ifdef CONFIG_RFS_ACCEL
+/**
+ * struct efx_async_filter_insertion - Request to asynchronously insert a filter
+ * @net_dev: Reference to the netdevice
+ * @spec: The filter to insert
+ * @work: Workitem for this request
+ * @rxq_index: Identifies the channel for which this request was made
+ * @flow_id: Identifies the kernel-side flow for which this request was made
+ */
+struct efx_async_filter_insertion {
+ struct net_device *net_dev;
+ struct efx_filter_spec spec;
+ struct work_struct work;
+ u16 rxq_index;
+ u32 flow_id;
+};
+
+static void efx_filter_rfs_work(struct work_struct *data)
+{
+ struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
+ work);
+ struct efx_nic *efx = netdev_priv(req->net_dev);
+ struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
+ int rc;
+
+ rc = efx->type->filter_insert(efx, &req->spec, false);
+ if (rc >= 0) {
+ /* Remember this so we can check whether to expire the filter
+ * later.
+ */
+ mutex_lock(&efx->rps_mutex);
+ channel->rps_flow_id[rc] = req->flow_id;
+ ++channel->rfs_filters_added;
+ mutex_unlock(&efx->rps_mutex);
+
+ if (req->spec.ether_type == htons(ETH_P_IP))
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ req->spec.rem_host, ntohs(req->spec.rem_port),
+ req->spec.loc_host, ntohs(req->spec.loc_port),
+ req->rxq_index, req->flow_id, rc);
+ else
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+ (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ req->spec.rem_host, ntohs(req->spec.rem_port),
+ req->spec.loc_host, ntohs(req->spec.loc_port),
+ req->rxq_index, req->flow_id, rc);
+ }
+
+ /* Release references */
+ dev_put(req->net_dev);
+ kfree(req);
+}
+
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
- struct efx_filter_spec spec;
+ struct efx_async_filter_insertion *req;
struct flow_keys fk;
- int rc;
if (flow_id == RPS_FLOW_ID_INVALID)
return -EINVAL;
@@ -847,50 +900,39 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
return -EPROTONOSUPPORT;
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
+ req = kmalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+
+ efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
- spec.match_flags =
+ req->spec.match_flags =
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
- spec.ether_type = fk.basic.n_proto;
- spec.ip_proto = fk.basic.ip_proto;
+ req->spec.ether_type = fk.basic.n_proto;
+ req->spec.ip_proto = fk.basic.ip_proto;
if (fk.basic.n_proto == htons(ETH_P_IP)) {
- spec.rem_host[0] = fk.addrs.v4addrs.src;
- spec.loc_host[0] = fk.addrs.v4addrs.dst;
+ req->spec.rem_host[0] = fk.addrs.v4addrs.src;
+ req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
} else {
- memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
- memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
+ memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
+ sizeof(struct in6_addr));
+ memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
}
- spec.rem_port = fk.ports.src;
- spec.loc_port = fk.ports.dst;
-
- rc = efx->type->filter_rfs_insert(efx, &spec);
- if (rc < 0)
- return rc;
+ req->spec.rem_port = fk.ports.src;
+ req->spec.loc_port = fk.ports.dst;
- /* Remember this so we can check whether to expire the filter later */
- channel = efx_get_channel(efx, rxq_index);
- channel->rps_flow_id[rc] = flow_id;
- ++channel->rfs_filters_added;
-
- if (spec.ether_type == htons(ETH_P_IP))
- netif_info(efx, rx_status, efx->net_dev,
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
- (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
- ntohs(spec.loc_port), rxq_index, flow_id, rc);
- else
- netif_info(efx, rx_status, efx->net_dev,
- "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
- (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
- ntohs(spec.loc_port), rxq_index, flow_id, rc);
-
- return rc;
+ dev_hold(req->net_dev = net_dev);
+ INIT_WORK(&req->work, efx_filter_rfs_work);
+ req->rxq_index = rxq_index;
+ req->flow_id = flow_id;
+ schedule_work(&req->work);
+ return 0;
}
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
@@ -899,9 +941,8 @@ bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
unsigned int channel_idx, index, size;
u32 flow_id;
- if (!spin_trylock_bh(&efx->filter_lock))
+ if (!mutex_trylock(&efx->rps_mutex))
return false;
-
expire_one = efx->type->filter_rfs_expire_one;
channel_idx = efx->rps_expire_channel;
index = efx->rps_expire_index;
@@ -926,7 +967,7 @@ bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index;
- spin_unlock_bh(&efx->filter_lock);
+ mutex_unlock(&efx->rps_mutex);
return true;
}
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 18aab25234ba..65161f68265a 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1035,7 +1035,6 @@ const struct efx_nic_type siena_a0_nic_type = {
.filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
.filter_get_rx_ids = efx_farch_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
- .filter_rfs_insert = efx_farch_filter_rfs_insert,
.filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
#endif
#ifdef CONFIG_SFC_MTD
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index f5c5984afefb..0b3b7a460641 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1701,6 +1701,10 @@ static const struct ave_soc_data ave_ld20_data = {
.is_desc_64bit = true,
};
+static const struct ave_soc_data ave_pxs3_data = {
+ .is_desc_64bit = false,
+};
+
static const struct of_device_id of_ave_match[] = {
{
.compatible = "socionext,uniphier-pro4-ave4",
@@ -1718,6 +1722,10 @@ static const struct of_device_id of_ave_match[] = {
.compatible = "socionext,uniphier-ld20-ave4",
.data = &ave_ld20_data,
},
+ {
+ .compatible = "socionext,uniphier-pxs3-ave4",
+ .data = &ave_pxs3_data,
+ },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_ave_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a9856a8bf8ad..9f983dd069d5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -57,36 +57,36 @@
/* Module parameters */
#define TX_TIMEO 5000
static int watchdog = TX_TIMEO;
-module_param(watchdog, int, S_IRUGO | S_IWUSR);
+module_param(watchdog, int, 0644);
MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
static int debug = -1;
-module_param(debug, int, S_IRUGO | S_IWUSR);
+module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
static int phyaddr = -1;
-module_param(phyaddr, int, S_IRUGO);
+module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
static int flow_ctrl = FLOW_OFF;
-module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
+module_param(flow_ctrl, int, 0644);
MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
static int pause = PAUSE_TIME;
-module_param(pause, int, S_IRUGO | S_IWUSR);
+module_param(pause, int, 0644);
MODULE_PARM_DESC(pause, "Flow Control Pause Time");
#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
-module_param(tc, int, S_IRUGO | S_IWUSR);
+module_param(tc, int, 0644);
MODULE_PARM_DESC(tc, "DMA threshold control value");
#define DEFAULT_BUFSIZE 1536
static int buf_sz = DEFAULT_BUFSIZE;
-module_param(buf_sz, int, S_IRUGO | S_IWUSR);
+module_param(buf_sz, int, 0644);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
#define STMMAC_RX_COPYBREAK 256
@@ -97,7 +97,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
#define STMMAC_DEFAULT_LPI_TIMER 1000
static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
-module_param(eee_timer, int, S_IRUGO | S_IWUSR);
+module_param(eee_timer, int, 0644);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
@@ -105,7 +105,7 @@ MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
* but allow user to force to use the chain instead of the ring
*/
static unsigned int chain_mode;
-module_param(chain_mode, int, S_IRUGO);
+module_param(chain_mode, int, 0444);
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
@@ -4001,7 +4001,7 @@ static int stmmac_init_fs(struct net_device *dev)
/* Entry to report DMA RX/TX rings */
priv->dbgfs_rings_status =
- debugfs_create_file("descriptors_status", S_IRUGO,
+ debugfs_create_file("descriptors_status", 0444,
priv->dbgfs_dir, dev,
&stmmac_rings_status_fops);
@@ -4013,9 +4013,9 @@ static int stmmac_init_fs(struct net_device *dev)
}
/* Entry to report the DMA HW features */
- priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
- priv->dbgfs_dir,
- dev, &stmmac_dma_cap_fops);
+ priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
+ priv->dbgfs_dir,
+ dev, &stmmac_dma_cap_fops);
if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8dd545fed30d..f081de4f38d7 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9437,11 +9437,11 @@ static ssize_t show_num_ports(struct device *dev,
}
static struct device_attribute niu_parent_attributes[] = {
- __ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
- __ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
- __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
- __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
- __ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
+ __ATTR(port_phy, 0444, show_port_phy, NULL),
+ __ATTR(plat_type, 0444, show_plat_type, NULL),
+ __ATTR(rxchan_per_port, 0444, show_rxchan_per_port, NULL),
+ __ATTR(txchan_per_port, 0444, show_txchan_per_port, NULL),
+ __ATTR(num_ports, 0444, show_num_ports, NULL),
{}
};
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 516dd59249d7..b919e89a9b93 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1694,7 +1694,6 @@ static struct pernet_operations geneve_net_ops = {
.exit_batch = geneve_exit_batch_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
- .async = true,
};
static int __init geneve_init_module(void)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 127edd23018f..f38e32a7ec9c 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1325,7 +1325,6 @@ static struct pernet_operations gtp_net_ops = {
.exit = gtp_net_exit,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
- .async = true,
};
static int __init gtp_init(void)
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 78a6414c5fd9..dfabbae72efd 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -590,8 +590,7 @@ static int bpq_device_event(struct notifier_block *this,
static int __init bpq_init_driver(void)
{
#ifdef CONFIG_PROC_FS
- if (!proc_create("bpqether", S_IRUGO, init_net.proc_net,
- &bpq_info_fops)) {
+ if (!proc_create("bpqether", 0444, init_net.proc_net, &bpq_info_fops)) {
printk(KERN_ERR
"bpq: cannot create /proc/net/bpqether entry.\n");
return -ENOENT;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 14c3632b8cde..83034eb7ed4f 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1168,7 +1168,7 @@ static int __init yam_init_driver(void)
yam_timer.expires = jiffies + HZ / 100;
add_timer(&yam_timer);
- proc_create("yam", S_IRUGO, init_net.proc_net, &yam_info_fops);
+ proc_create("yam", 0444, init_net.proc_net, &yam_info_fops);
return 0;
error:
while (--i >= 0) {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5d716750ae22..ecc84954c511 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -55,7 +55,7 @@
#define VF_TAKEOVER_INT (HZ / 10)
static unsigned int ring_size __ro_after_init = 128;
-module_param(ring_size, uint, S_IRUGO);
+module_param(ring_size, uint, 0444);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
unsigned int netvsc_ring_bytes __ro_after_init;
struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
@@ -66,7 +66,7 @@ static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_TX_ERR;
static int debug = -1;
-module_param(debug, int, S_IRUGO);
+module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static void netvsc_change_rx_flags(struct net_device *net, int change)
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 548d9d026a85..77abedf0b524 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1661,7 +1661,7 @@ static int at86rf230_debugfs_init(struct at86rf230_local *lp)
if (!at86rf230_debugfs_root)
return -ENOMEM;
- stats = debugfs_create_file("trac_stats", S_IRUGO,
+ stats = debugfs_create_file("trac_stats", 0444,
at86rf230_debugfs_root, lp,
&at86rf230_stats_fops);
if (!stats)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 743d37fb034a..450eec264a5e 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -1040,7 +1040,6 @@ static struct pernet_operations ipvlan_net_ops = {
.id = &ipvlan_netid,
.size = sizeof(struct ipvlan_netns),
.exit = ipvlan_ns_exit,
- .async = true,
};
static int __init ipvlan_init_module(void)
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index b97a907ea5aa..30612497643c 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -230,5 +230,4 @@ out:
/* Registered in net/core/dev.c */
struct pernet_operations __net_initdata loopback_net_ops = {
.init = loopback_net_init,
- .async = true,
};
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 09388c06171d..449b2a1a1800 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -9,3 +9,7 @@ ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
bpf.o
endif
+
+ifneq ($(CONFIG_NET_DEVLINK),)
+netdevsim-objs += devlink.o fib.o
+endif
diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c
new file mode 100644
index 000000000000..bbdcf064ba10
--- /dev/null
+++ b/drivers/net/netdevsim/devlink.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2018 Cumulus Networks. All rights reserved.
+ * Copyright (c) 2018 David Ahern <[email protected]>
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/device.h>
+#include <net/devlink.h>
+#include <net/netns/generic.h>
+
+#include "netdevsim.h"
+
+static unsigned int nsim_devlink_id;
+
+/* place holder until devlink and namespaces is sorted out */
+static struct net *nsim_devlink_net(struct devlink *devlink)
+{
+ return &init_net;
+}
+
+/* IPv4
+ */
+static u64 nsim_ipv4_fib_resource_occ_get(struct devlink *devlink)
+{
+ struct net *net = nsim_devlink_net(devlink);
+
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
+}
+
+static struct devlink_resource_ops nsim_ipv4_fib_res_ops = {
+ .occ_get = nsim_ipv4_fib_resource_occ_get,
+};
+
+static u64 nsim_ipv4_fib_rules_res_occ_get(struct devlink *devlink)
+{
+ struct net *net = nsim_devlink_net(devlink);
+
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
+}
+
+static struct devlink_resource_ops nsim_ipv4_fib_rules_res_ops = {
+ .occ_get = nsim_ipv4_fib_rules_res_occ_get,
+};
+
+/* IPv6
+ */
+static u64 nsim_ipv6_fib_resource_occ_get(struct devlink *devlink)
+{
+ struct net *net = nsim_devlink_net(devlink);
+
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
+}
+
+static struct devlink_resource_ops nsim_ipv6_fib_res_ops = {
+ .occ_get = nsim_ipv6_fib_resource_occ_get,
+};
+
+static u64 nsim_ipv6_fib_rules_res_occ_get(struct devlink *devlink)
+{
+ struct net *net = nsim_devlink_net(devlink);
+
+ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
+}
+
+static struct devlink_resource_ops nsim_ipv6_fib_rules_res_ops = {
+ .occ_get = nsim_ipv6_fib_rules_res_occ_get,
+};
+
+static int devlink_resources_register(struct devlink *devlink)
+{
+ struct devlink_resource_size_params params = {
+ .size_max = (u64)-1,
+ .size_granularity = 1,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY
+ };
+ struct net *net = nsim_devlink_net(devlink);
+ int err;
+ u64 n;
+
+ /* Resources for IPv4 */
+ err = devlink_resource_register(devlink, "IPv4", (u64)-1,
+ NSIM_RESOURCE_IPV4,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params, NULL);
+ if (err) {
+ pr_err("Failed to register IPv4 top resource\n");
+ goto out;
+ }
+
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
+ err = devlink_resource_register(devlink, "fib", n,
+ NSIM_RESOURCE_IPV4_FIB,
+ NSIM_RESOURCE_IPV4,
+ &params, &nsim_ipv4_fib_res_ops);
+ if (err) {
+ pr_err("Failed to register IPv4 FIB resource\n");
+ return err;
+ }
+
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
+ err = devlink_resource_register(devlink, "fib-rules", n,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV4,
+ &params, &nsim_ipv4_fib_rules_res_ops);
+ if (err) {
+ pr_err("Failed to register IPv4 FIB rules resource\n");
+ return err;
+ }
+
+ /* Resources for IPv6 */
+ err = devlink_resource_register(devlink, "IPv6", (u64)-1,
+ NSIM_RESOURCE_IPV6,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params, NULL);
+ if (err) {
+ pr_err("Failed to register IPv6 top resource\n");
+ goto out;
+ }
+
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
+ err = devlink_resource_register(devlink, "fib", n,
+ NSIM_RESOURCE_IPV6_FIB,
+ NSIM_RESOURCE_IPV6,
+ &params, &nsim_ipv6_fib_res_ops);
+ if (err) {
+ pr_err("Failed to register IPv6 FIB resource\n");
+ return err;
+ }
+
+ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
+ err = devlink_resource_register(devlink, "fib-rules", n,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_IPV6,
+ &params, &nsim_ipv6_fib_rules_res_ops);
+ if (err) {
+ pr_err("Failed to register IPv6 FIB rules resource\n");
+ return err;
+ }
+out:
+ return err;
+}
+
+static int nsim_devlink_reload(struct devlink *devlink)
+{
+ enum nsim_resource_id res_ids[] = {
+ NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ };
+ struct net *net = nsim_devlink_net(devlink);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
+ int err;
+ u64 val;
+
+ err = devlink_resource_size_get(devlink, res_ids[i], &val);
+ if (!err) {
+ err = nsim_fib_set_max(net, res_ids[i], val);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void nsim_devlink_net_reset(struct net *net)
+{
+ enum nsim_resource_id res_ids[] = {
+ NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
+ if (nsim_fib_set_max(net, res_ids[i], (u64)-1)) {
+ pr_err("Failed to reset limit for resource %u\n",
+ res_ids[i]);
+ }
+ }
+}
+
+static const struct devlink_ops nsim_devlink_ops = {
+ .reload = nsim_devlink_reload,
+};
+
+/* once devlink / namespace issues are sorted out
+ * this needs to be net in which a devlink instance
+ * is to be created. e.g., dev_net(ns->netdev)
+ */
+static struct net *nsim_to_net(struct netdevsim *ns)
+{
+ return &init_net;
+}
+
+void nsim_devlink_teardown(struct netdevsim *ns)
+{
+ if (ns->devlink) {
+ struct net *net = nsim_to_net(ns);
+ bool *reg_devlink = net_generic(net, nsim_devlink_id);
+
+ devlink_unregister(ns->devlink);
+ devlink_free(ns->devlink);
+ ns->devlink = NULL;
+
+ nsim_devlink_net_reset(net);
+ *reg_devlink = true;
+ }
+}
+
+void nsim_devlink_setup(struct netdevsim *ns)
+{
+ struct net *net = nsim_to_net(ns);
+ bool *reg_devlink = net_generic(net, nsim_devlink_id);
+ struct devlink *devlink;
+ int err = -ENOMEM;
+
+ /* only one device per namespace controls devlink */
+ if (!*reg_devlink) {
+ ns->devlink = NULL;
+ return;
+ }
+
+ devlink = devlink_alloc(&nsim_devlink_ops, 0);
+ if (!devlink)
+ return;
+
+ err = devlink_register(devlink, &ns->dev);
+ if (err)
+ goto err_devlink_free;
+
+ err = devlink_resources_register(devlink);
+ if (err)
+ goto err_dl_unregister;
+
+ ns->devlink = devlink;
+
+ *reg_devlink = false;
+
+ return;
+
+err_dl_unregister:
+ devlink_unregister(devlink);
+err_devlink_free:
+ devlink_free(devlink);
+}
+
+/* Initialize per network namespace state */
+static int __net_init nsim_devlink_netns_init(struct net *net)
+{
+ bool *reg_devlink = net_generic(net, nsim_devlink_id);
+
+ *reg_devlink = true;
+
+ return 0;
+}
+
+static struct pernet_operations nsim_devlink_net_ops __net_initdata = {
+ .init = nsim_devlink_netns_init,
+ .id = &nsim_devlink_id,
+ .size = sizeof(bool),
+};
+
+void nsim_devlink_exit(void)
+{
+ unregister_pernet_subsys(&nsim_devlink_net_ops);
+ nsim_fib_exit();
+}
+
+int nsim_devlink_init(void)
+{
+ int err;
+
+ err = nsim_fib_init();
+ if (err)
+ goto err_out;
+
+ err = register_pernet_subsys(&nsim_devlink_net_ops);
+ if (err)
+ nsim_fib_exit();
+
+err_out:
+ return err;
+}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
new file mode 100644
index 000000000000..0d105bafa261
--- /dev/null
+++ b/drivers/net/netdevsim/fib.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2018 Cumulus Networks. All rights reserved.
+ * Copyright (c) 2018 David Ahern <[email protected]>
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <net/fib_notifier.h>
+#include <net/ip_fib.h>
+#include <net/ip6_fib.h>
+#include <net/fib_rules.h>
+#include <net/netns/generic.h>
+
+#include "netdevsim.h"
+
+struct nsim_fib_entry {
+ u64 max;
+ u64 num;
+};
+
+struct nsim_per_fib_data {
+ struct nsim_fib_entry fib;
+ struct nsim_fib_entry rules;
+};
+
+struct nsim_fib_data {
+ struct nsim_per_fib_data ipv4;
+ struct nsim_per_fib_data ipv6;
+};
+
+static unsigned int nsim_fib_net_id;
+
+u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
+{
+ struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
+ struct nsim_fib_entry *entry;
+
+ switch (res_id) {
+ case NSIM_RESOURCE_IPV4_FIB:
+ entry = &fib_data->ipv4.fib;
+ break;
+ case NSIM_RESOURCE_IPV4_FIB_RULES:
+ entry = &fib_data->ipv4.rules;
+ break;
+ case NSIM_RESOURCE_IPV6_FIB:
+ entry = &fib_data->ipv6.fib;
+ break;
+ case NSIM_RESOURCE_IPV6_FIB_RULES:
+ entry = &fib_data->ipv6.rules;
+ break;
+ default:
+ return 0;
+ }
+
+ return max ? entry->max : entry->num;
+}
+
+int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val)
+{
+ struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
+ struct nsim_fib_entry *entry;
+ int err = 0;
+
+ switch (res_id) {
+ case NSIM_RESOURCE_IPV4_FIB:
+ entry = &fib_data->ipv4.fib;
+ break;
+ case NSIM_RESOURCE_IPV4_FIB_RULES:
+ entry = &fib_data->ipv4.rules;
+ break;
+ case NSIM_RESOURCE_IPV6_FIB:
+ entry = &fib_data->ipv6.fib;
+ break;
+ case NSIM_RESOURCE_IPV6_FIB_RULES:
+ entry = &fib_data->ipv6.rules;
+ break;
+ default:
+ return 0;
+ }
+
+ /* not allowing a new max to be less than curren occupancy
+ * --> no means of evicting entries
+ */
+ if (val < entry->num)
+ err = -EINVAL;
+ else
+ entry->max = val;
+
+ return err;
+}
+
+static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ if (add) {
+ if (entry->num < entry->max) {
+ entry->num++;
+ } else {
+ err = -ENOSPC;
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib rule entries");
+ }
+ } else {
+ entry->num--;
+ }
+
+ return err;
+}
+
+static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
+{
+ struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
+ struct netlink_ext_ack *extack = info->extack;
+ int err = 0;
+
+ switch (info->family) {
+ case AF_INET:
+ err = nsim_fib_rule_account(&data->ipv4.rules, add, extack);
+ break;
+ case AF_INET6:
+ err = nsim_fib_rule_account(&data->ipv6.rules, add, extack);
+ break;
+ }
+
+ return err;
+}
+
+static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ if (add) {
+ if (entry->num < entry->max) {
+ entry->num++;
+ } else {
+ err = -ENOSPC;
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib entries");
+ }
+ } else {
+ entry->num--;
+ }
+
+ return err;
+}
+
+static int nsim_fib_event(struct fib_notifier_info *info, bool add)
+{
+ struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
+ struct netlink_ext_ack *extack = info->extack;
+ int err = 0;
+
+ switch (info->family) {
+ case AF_INET:
+ err = nsim_fib_account(&data->ipv4.fib, add, extack);
+ break;
+ case AF_INET6:
+ err = nsim_fib_account(&data->ipv6.fib, add, extack);
+ break;
+ }
+
+ return err;
+}
+
+static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct fib_notifier_info *info = ptr;
+ int err = 0;
+
+ switch (event) {
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
+ break;
+
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_DEL:
+ err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
+ break;
+ }
+
+ return notifier_from_errno(err);
+}
+
+/* inconsistent dump, trying again */
+static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
+{
+ struct nsim_fib_data *data;
+ struct net *net;
+
+ rcu_read_lock();
+ for_each_net_rcu(net) {
+ data = net_generic(net, nsim_fib_net_id);
+
+ data->ipv4.fib.num = 0ULL;
+ data->ipv4.rules.num = 0ULL;
+
+ data->ipv6.fib.num = 0ULL;
+ data->ipv6.rules.num = 0ULL;
+ }
+ rcu_read_unlock();
+}
+
+static struct notifier_block nsim_fib_nb = {
+ .notifier_call = nsim_fib_event_nb,
+};
+
+/* Initialize per network namespace state */
+static int __net_init nsim_fib_netns_init(struct net *net)
+{
+ struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
+
+ data->ipv4.fib.max = (u64)-1;
+ data->ipv4.rules.max = (u64)-1;
+
+ data->ipv6.fib.max = (u64)-1;
+ data->ipv6.rules.max = (u64)-1;
+
+ return 0;
+}
+
+static struct pernet_operations nsim_fib_net_ops __net_initdata = {
+ .init = nsim_fib_netns_init,
+ .id = &nsim_fib_net_id,
+ .size = sizeof(struct nsim_fib_data),
+};
+
+void nsim_fib_exit(void)
+{
+ unregister_pernet_subsys(&nsim_fib_net_ops);
+ unregister_fib_notifier(&nsim_fib_nb);
+}
+
+int nsim_fib_init(void)
+{
+ int err;
+
+ err = register_pernet_subsys(&nsim_fib_net_ops);
+ if (err < 0) {
+ pr_err("Failed to register pernet subsystem\n");
+ goto err_out;
+ }
+
+ err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
+ if (err < 0) {
+ pr_err("Failed to register fib notifier\n");
+ goto err_out;
+ }
+
+err_out:
+ return err;
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 3fd567928f3d..8b30ab3ea2c2 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -167,6 +167,8 @@ static int nsim_init(struct net_device *dev)
SET_NETDEV_DEV(dev, &ns->dev);
+ nsim_devlink_setup(ns);
+
return 0;
err_bpf_uninit:
@@ -180,6 +182,7 @@ static void nsim_uninit(struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ nsim_devlink_teardown(ns);
debugfs_remove_recursive(ns->ddir);
nsim_bpf_uninit(ns);
}
@@ -478,12 +481,18 @@ static int __init nsim_module_init(void)
if (err)
goto err_debugfs_destroy;
- err = rtnl_link_register(&nsim_link_ops);
+ err = nsim_devlink_init();
if (err)
goto err_unreg_bus;
+ err = rtnl_link_register(&nsim_link_ops);
+ if (err)
+ goto err_dl_fini;
+
return 0;
+err_dl_fini:
+ nsim_devlink_exit();
err_unreg_bus:
bus_unregister(&nsim_bus);
err_debugfs_destroy:
@@ -494,6 +503,7 @@ err_debugfs_destroy:
static void __exit nsim_module_exit(void)
{
rtnl_link_unregister(&nsim_link_ops);
+ nsim_devlink_exit();
bus_unregister(&nsim_bus);
debugfs_remove_recursive(nsim_ddir);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index ea081c10efb8..afb8cf90c0fd 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -64,6 +64,9 @@ struct netdevsim {
bool bpf_map_accept;
struct list_head bpf_bound_maps;
+#if IS_ENABLED(CONFIG_NET_DEVLINK)
+ struct devlink *devlink;
+#endif
};
extern struct dentry *nsim_ddir;
@@ -103,6 +106,46 @@ nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
#endif
+#if IS_ENABLED(CONFIG_NET_DEVLINK)
+enum nsim_resource_id {
+ NSIM_RESOURCE_NONE, /* DEVLINK_RESOURCE_ID_PARENT_TOP */
+ NSIM_RESOURCE_IPV4,
+ NSIM_RESOURCE_IPV4_FIB,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6,
+ NSIM_RESOURCE_IPV6_FIB,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+};
+
+void nsim_devlink_setup(struct netdevsim *ns);
+void nsim_devlink_teardown(struct netdevsim *ns);
+
+int nsim_devlink_init(void);
+void nsim_devlink_exit(void);
+
+int nsim_fib_init(void);
+void nsim_fib_exit(void);
+u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
+int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val);
+#else
+static inline void nsim_devlink_setup(struct netdevsim *ns)
+{
+}
+
+static inline void nsim_devlink_teardown(struct netdevsim *ns)
+{
+}
+
+static inline int nsim_devlink_init(void)
+{
+ return 0;
+}
+
+static inline void nsim_devlink_exit(void)
+{
+}
+#endif
+
static inline struct netdevsim *to_nsim(struct device *ptr)
{
return container_of(ptr, struct netdevsim, dev);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 83bf4959b043..4ab6e9a50bbe 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -560,6 +560,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
{
/* SFP module inserted - read I2C data */
struct sfp_eeprom_id id;
+ bool cotsworks;
u8 check;
int ret;
@@ -574,23 +575,43 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
return -EAGAIN;
}
+ /* Cotsworks do not seem to update the checksums when they
+ * do the final programming with the final module part number,
+ * serial number and date code.
+ */
+ cotsworks = !memcmp(id.base.vendor_name, "COTSWORKS ", 16);
+
/* Validate the checksum over the base structure */
check = sfp_check(&id.base, sizeof(id.base) - 1);
if (check != id.base.cc_base) {
- dev_err(sfp->dev,
- "EEPROM base structure checksum failure: 0x%02x\n",
- check);
- print_hex_dump(KERN_ERR, "sfp EE: ", DUMP_PREFIX_OFFSET,
- 16, 1, &id, sizeof(id.base) - 1, true);
- return -EINVAL;
+ if (cotsworks) {
+ dev_warn(sfp->dev,
+ "EEPROM base structure checksum failure (0x%02x != 0x%02x)\n",
+ check, id.base.cc_base);
+ } else {
+ dev_err(sfp->dev,
+ "EEPROM base structure checksum failure: 0x%02x != 0x%02x\n",
+ check, id.base.cc_base);
+ print_hex_dump(KERN_ERR, "sfp EE: ", DUMP_PREFIX_OFFSET,
+ 16, 1, &id, sizeof(id), true);
+ return -EINVAL;
+ }
}
check = sfp_check(&id.ext, sizeof(id.ext) - 1);
if (check != id.ext.cc_ext) {
- dev_err(sfp->dev,
- "EEPROM extended structure checksum failure: 0x%02x\n",
- check);
- memset(&id.ext, 0, sizeof(id.ext));
+ if (cotsworks) {
+ dev_warn(sfp->dev,
+ "EEPROM extended structure checksum failure (0x%02x != 0x%02x)\n",
+ check, id.ext.cc_ext);
+ } else {
+ dev_err(sfp->dev,
+ "EEPROM extended structure checksum failure: 0x%02x != 0x%02x\n",
+ check, id.ext.cc_ext);
+ print_hex_dump(KERN_ERR, "sfp EE: ", DUMP_PREFIX_OFFSET,
+ 16, 1, &id, sizeof(id), true);
+ memset(&id.ext, 0, sizeof(id.ext));
+ }
}
sfp->id = id;
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 1e2d4f1179da..f17b3441779b 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -417,7 +417,7 @@ static void ks8995_parse_dt(struct ks8995_switch *ks)
static const struct bin_attribute ks8995_registers_attr = {
.attr = {
.name = "registers",
- .mode = S_IRUSR | S_IWUSR,
+ .mode = 0600,
},
.size = KS8995_REGS_SIZE,
.read = ks8995_registers_read,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 926c2c322d43..dc7c7ec43202 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -970,7 +970,6 @@ static struct pernet_operations ppp_net_ops = {
.exit = ppp_exit_net,
.id = &ppp_net_id,
.size = sizeof(struct ppp_net),
- .async = true,
};
static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
@@ -1687,7 +1686,7 @@ ppp_push(struct ppp *ppp)
#ifdef CONFIG_PPP_MULTILINK
static bool mp_protocol_compress __read_mostly = true;
-module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR);
+module_param(mp_protocol_compress, bool, 0644);
MODULE_PARM_DESC(mp_protocol_compress,
"compress protocol id in multilink fragments");
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index c10e6181a2f0..1483bc7b01e1 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1142,7 +1142,7 @@ static __net_init int pppoe_init_net(struct net *net)
rwlock_init(&pn->hash_lock);
- pde = proc_create("pppoe", S_IRUGO, net->proc_net, &pppoe_seq_fops);
+ pde = proc_create("pppoe", 0444, net->proc_net, &pppoe_seq_fops);
#ifdef CONFIG_PROC_FS
if (!pde)
return -ENOMEM;
@@ -1161,7 +1161,6 @@ static struct pernet_operations pppoe_net_ops = {
.exit = pppoe_exit_net,
.id = &pppoe_net_id,
.size = sizeof(struct pppoe_net),
- .async = true,
};
static int __init pppoe_init(void)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 9e1b74590682..90d07ed224d5 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -58,7 +58,7 @@ static bool prefer_mbim = true;
#else
static bool prefer_mbim;
#endif
-module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR);
+module_param(prefer_mbim, bool, 0644);
MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions");
static void cdc_ncm_txpath_bh(unsigned long param);
@@ -281,10 +281,10 @@ static ssize_t cdc_ncm_store_tx_timer_usecs(struct device *d, struct device_att
return len;
}
-static DEVICE_ATTR(min_tx_pkt, S_IRUGO | S_IWUSR, cdc_ncm_show_min_tx_pkt, cdc_ncm_store_min_tx_pkt);
-static DEVICE_ATTR(rx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_rx_max, cdc_ncm_store_rx_max);
-static DEVICE_ATTR(tx_max, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_max, cdc_ncm_store_tx_max);
-static DEVICE_ATTR(tx_timer_usecs, S_IRUGO | S_IWUSR, cdc_ncm_show_tx_timer_usecs, cdc_ncm_store_tx_timer_usecs);
+static DEVICE_ATTR(min_tx_pkt, 0644, cdc_ncm_show_min_tx_pkt, cdc_ncm_store_min_tx_pkt);
+static DEVICE_ATTR(rx_max, 0644, cdc_ncm_show_rx_max, cdc_ncm_store_rx_max);
+static DEVICE_ATTR(tx_max, 0644, cdc_ncm_show_tx_max, cdc_ncm_store_tx_max);
+static DEVICE_ATTR(tx_timer_usecs, 0644, cdc_ncm_show_tx_timer_usecs, cdc_ncm_store_tx_timer_usecs);
static ssize_t ndp_to_end_show(struct device *d, struct device_attribute *attr, char *buf)
{
@@ -335,7 +335,7 @@ static ssize_t cdc_ncm_show_##name(struct device *d, struct device_attribute *at
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; \
return sprintf(buf, format "\n", tocpu(ctx->ncm_parm.name)); \
} \
-static DEVICE_ATTR(name, S_IRUGO, cdc_ncm_show_##name, NULL)
+static DEVICE_ATTR(name, 0444, cdc_ncm_show_##name, NULL)
NCM_PARM_ATTR(bmNtbFormatsSupported, "0x%04x", le16_to_cpu);
NCM_PARM_ATTR(dwNtbInMaxSize, "%u", le32_to_cpu);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 981c931a7a1f..e53883ad6107 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -519,7 +519,7 @@ static ssize_t hso_sysfs_show_porttype(struct device *dev,
return sprintf(buf, "%s\n", port_name);
}
-static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL);
+static DEVICE_ATTR(hsotype, 0444, hso_sysfs_show_porttype, NULL);
static struct attribute *hso_serial_dev_attrs[] = {
&dev_attr_hsotype.attr,
@@ -3289,12 +3289,12 @@ MODULE_LICENSE("GPL");
/* change the debug level (eg: insmod hso.ko debug=0x04) */
MODULE_PARM_DESC(debug, "debug level mask [0x01 | 0x02 | 0x04 | 0x08 | 0x10]");
-module_param(debug, int, S_IRUGO | S_IWUSR);
+module_param(debug, int, 0644);
/* set the major tty number (eg: insmod hso.ko tty_major=245) */
MODULE_PARM_DESC(tty_major, "Set the major tty number");
-module_param(tty_major, int, S_IRUGO | S_IWUSR);
+module_param(tty_major, int, 0644);
/* disable network interface (eg: insmod hso.ko disable_net=1) */
MODULE_PARM_DESC(disable_net, "Disable the network interface");
-module_param(disable_net, int, S_IRUGO | S_IWUSR);
+module_param(disable_net, int, 0644);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index c6be49d3a9eb..102582459bef 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1435,7 +1435,6 @@ static struct pernet_operations vrf_net_ops __net_initdata = {
.init = vrf_netns_init,
.id = &vrf_net_id,
.size = sizeof(bool),
- .async = true,
};
static int __init vrf_init_module(void)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index aa5f034d6ad1..fab7a4db249e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3752,7 +3752,6 @@ static struct pernet_operations vxlan_net_ops = {
.exit_batch = vxlan_exit_batch_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
- .async = true,
};
static int __init vxlan_init_module(void)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 100cf42db65d..6afe896e5cb8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2584,8 +2584,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
addr[4] = idx;
memcpy(data->addresses[0].addr, addr, ETH_ALEN);
/* Why need here second address ? */
- data->addresses[1].addr[0] |= 0x40;
memcpy(data->addresses[1].addr, addr, ETH_ALEN);
+ data->addresses[1].addr[0] |= 0x40;
hw->wiphy->n_addresses = 2;
hw->wiphy->addresses = data->addresses;
/* possible address clash is checked at hash table insertion */
@@ -3529,8 +3529,12 @@ static void __net_exit hwsim_exit_net(struct net *net)
list_del(&data->list);
rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
hwsim_rht_params);
- INIT_WORK(&data->destroy_work, destroy_radio);
- queue_work(hwsim_wq, &data->destroy_work);
+ hwsim_radios_generation++;
+ spin_unlock_bh(&hwsim_radio_lock);
+ mac80211_hwsim_del_radio(data,
+ wiphy_name(data->hw->wiphy),
+ NULL);
+ spin_lock_bh(&hwsim_radio_lock);
}
spin_unlock_bh(&hwsim_radio_lock);
@@ -3542,7 +3546,6 @@ static struct pernet_operations hwsim_net_ops = {
.exit = hwsim_exit_net,
.id = &hwsim_net_id,
.size = sizeof(struct hwsim_net),
- .async = true,
};
static void hwsim_exit_netlink(void)
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index a56d3eab35dd..e1aef253601e 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -224,7 +224,7 @@ static void xenvif_debugfs_addif(struct xenvif *vif)
snprintf(filename, sizeof(filename), "io_ring_q%d", i);
pfile = debugfs_create_file(filename,
- S_IRUSR | S_IWUSR,
+ 0600,
vif->xenvif_dbg_root,
&vif->queues[i],
&xenvif_dbg_io_ring_ops_fops);
@@ -235,7 +235,7 @@ static void xenvif_debugfs_addif(struct xenvif *vif)
if (vif->ctrl_irq) {
pfile = debugfs_create_file("ctrl",
- S_IRUSR,
+ 0400,
vif->xenvif_dbg_root,
vif,
&xenvif_dbg_ctrl_ops_fops);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 3127bc8633ca..4dd0668003e7 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2113,9 +2113,9 @@ static ssize_t store_rxbuf(struct device *dev,
return len;
}
-static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
-static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
-static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
+static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
+static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
+static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
static struct attribute *xennet_dev_attrs[] = {
&dev_attr_rxbuf_min.attr,
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index f38d6a561a84..72217170b155 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -118,6 +118,7 @@ struct afs_call {
bool ret_reply0; /* T if should return reply[0] on success */
bool upgrade; /* T to request service upgrade */
u16 service_id; /* Actual service ID (after upgrade) */
+ unsigned int debug_id; /* Trace ID */
u32 operation_ID; /* operation ID for an incoming call */
u32 count; /* count for use in unmarshalling */
__be32 tmp; /* place to extract temporary data */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index e1126659f043..b819900916e6 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -131,6 +131,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
call->type = type;
call->net = net;
+ call->debug_id = atomic_inc_return(&rxrpc_debug_id);
atomic_set(&call->usage, 1);
INIT_WORK(&call->async_work, afs_process_async_call);
init_waitqueue_head(&call->waitq);
@@ -169,11 +170,12 @@ void afs_put_call(struct afs_call *call)
afs_put_server(call->net, call->cm_server);
afs_put_cb_interest(call->net, call->cbi);
kfree(call->request);
- kfree(call);
- o = atomic_dec_return(&net->nr_outstanding_calls);
trace_afs_call(call, afs_call_trace_free, 0, o,
__builtin_return_address(0));
+ kfree(call);
+
+ o = atomic_dec_return(&net->nr_outstanding_calls);
if (o == 0)
wake_up_atomic_t(&net->nr_outstanding_calls);
}
@@ -378,7 +380,8 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
(async ?
afs_wake_up_async_call :
afs_wake_up_call_waiter),
- call->upgrade);
+ call->upgrade,
+ call->debug_id);
if (IS_ERR(rxcall)) {
ret = PTR_ERR(rxcall);
goto error_kill_call;
@@ -727,7 +730,8 @@ void afs_charge_preallocation(struct work_struct *work)
afs_wake_up_async_call,
afs_rx_attach,
(unsigned long)call,
- GFP_KERNEL) < 0)
+ GFP_KERNEL,
+ call->debug_id) < 0)
break;
call = NULL;
}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 2dee4e03ff1c..9c36d614bf89 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -709,7 +709,6 @@ static struct pernet_operations lockd_net_ops = {
.exit = lockd_exit_net,
.id = &lockd_net_id,
.size = sizeof(struct lockd_net),
- .async = true,
};
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 6c3083c992e5..7d893543cf3b 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -2122,7 +2122,6 @@ static struct pernet_operations nfs_net_ops = {
.exit = nfs_net_exit,
.id = &nfs_net_id,
.size = sizeof(struct nfs_net),
- .async = true,
};
/*
diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
index 8c743a405df6..5be08f02a76b 100644
--- a/fs/nfs_common/grace.c
+++ b/fs/nfs_common/grace.c
@@ -118,7 +118,6 @@ static struct pernet_operations grace_net_ops = {
.exit = grace_exit_net,
.id = &grace_net_id,
.size = sizeof(struct list_head),
- .async = true,
};
static int __init
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index da6f8733c9c5..68c06ae7888c 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -237,7 +237,6 @@ static __net_exit void proc_net_ns_exit(struct net *net)
static struct pernet_operations __net_initdata proc_net_ns_ops = {
.init = proc_net_ns_init,
.exit = proc_net_ns_exit,
- .async = true,
};
int __init proc_net_init(void)
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 445ad194e0fe..0ef6138eca49 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -193,6 +193,12 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
struct mlx5_core_cq *cq, u16 cq_period,
u16 cq_max_count);
+static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev,
+ struct mlx5_err_cqe *err_cqe)
+{
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+}
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index e5258ee4e38b..4b5939c78cdd 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1013,6 +1013,7 @@ enum mlx5_cap_type {
MLX5_CAP_RESERVED,
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
+ MLX5_CAP_DEBUG,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1140,6 +1141,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_QOS(mdev, cap)\
MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+#define MLX5_CAP_DEBUG(mdev, cap)\
+ MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
+
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b957e52434f8..47aecc4fa8c2 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -142,6 +142,12 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_fs_vlan {
+ u16 ethtype;
+ u16 vid;
+ u8 prio;
+};
+
struct mlx5_flow_act {
u32 action;
bool has_flow_tag;
@@ -149,6 +155,7 @@ struct mlx5_flow_act {
u32 encap_id;
u32 modify_id;
uintptr_t esp_id;
+ struct mlx5_fs_vlan vlan;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 14ad84afe8ba..c19e611d2782 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -143,6 +143,7 @@ enum {
MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
+ MLX5_CMD_OP_QUERY_VNIC_ENV = 0x76f,
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
@@ -313,7 +314,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 flow_table_modify[0x1];
u8 encap[0x1];
u8 decap[0x1];
- u8 reserved_at_9[0x17];
+ u8 reserved_at_9[0x1];
+ u8 pop_vlan[0x1];
+ u8 push_vlan[0x1];
+ u8 reserved_at_c[0x14];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
@@ -593,6 +597,16 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_100[0x700];
};
+struct mlx5_ifc_debug_cap_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x2];
+ u8 stall_detect[0x1];
+ u8 reserved_at_23[0x1d];
+
+ u8 reserved_at_40[0x7c0];
+};
+
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 csum_cap[0x1];
u8 vlan_cap[0x1];
@@ -855,7 +869,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
u8 retransmission_q_counters[0x1];
- u8 reserved_at_183[0x1];
+ u8 debug[0x1];
u8 modify_rq_counter_set_id[0x1];
u8 rq_delay_drop[0x1];
u8 max_qp_cnt[0xa];
@@ -865,7 +879,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vhca_group_manager[0x1];
u8 ib_virt[0x1];
u8 eth_virt[0x1];
- u8 reserved_at_1a4[0x1];
+ u8 vnic_env_queue_counters[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
@@ -997,7 +1011,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_330[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_at_340[0x8];
+ u8 nic_receive_steering_discard[0x1];
+ u8 receive_discard_vport_down[0x1];
+ u8 transmit_discard_vport_down[0x1];
+ u8 reserved_at_343[0x5];
u8 log_max_flow_counter_bulk[0x8];
u8 max_flow_counter_15_0[0x10];
@@ -1572,7 +1589,17 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
u8 rx_pause_transition_low[0x20];
- u8 reserved_at_3c0[0x400];
+ u8 reserved_at_3c0[0x40];
+
+ u8 device_stall_minor_watermark_cnt_high[0x20];
+
+ u8 device_stall_minor_watermark_cnt_low[0x20];
+
+ u8 device_stall_critical_watermark_cnt_high[0x20];
+
+ u8 device_stall_critical_watermark_cnt_low[0x20];
+
+ u8 reserved_at_480[0x340];
};
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
@@ -2287,10 +2314,19 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
+};
+
+struct mlx5_ifc_vlan_bits {
+ u8 ethtype[0x10];
+ u8 prio[0x3];
+ u8 cfi[0x1];
+ u8 vid[0xc];
};
struct mlx5_ifc_flow_context_bits {
- u8 reserved_at_0[0x20];
+ struct mlx5_ifc_vlan_bits push_vlan;
u8 group_id[0x20];
@@ -2366,6 +2402,24 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 reserved_at_180[0x80];
};
+struct mlx5_ifc_vnic_diagnostic_statistics_bits {
+ u8 counter_error_queues[0x20];
+
+ u8 total_error_queues[0x20];
+
+ u8 send_queue_priority_update_flow[0x20];
+
+ u8 reserved_at_60[0x20];
+
+ u8 nic_receive_steering_discard[0x40];
+
+ u8 receive_discard_vport_down[0x40];
+
+ u8 transmit_discard_vport_down[0x40];
+
+ u8 reserved_at_140[0xec0];
+};
+
struct mlx5_ifc_traffic_counter_bits {
u8 packets[0x40];
@@ -3641,6 +3695,35 @@ struct mlx5_ifc_query_vport_state_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_vnic_env_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_vnic_diagnostic_statistics_bits vport_env;
+};
+
+enum {
+ MLX5_QUERY_VNIC_ENV_IN_OP_MOD_VPORT_DIAG_STATISTICS = 0x0,
+};
+
+struct mlx5_ifc_query_vnic_env_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_query_vport_counter_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7813,7 +7896,11 @@ struct mlx5_ifc_pifr_reg_bits {
struct mlx5_ifc_pfcc_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_at_10[0x10];
+ u8 reserved_at_10[0xb];
+ u8 ppan_mask_n[0x1];
+ u8 minor_stall_mask[0x1];
+ u8 critical_stall_mask[0x1];
+ u8 reserved_at_1e[0x2];
u8 ppan[0x4];
u8 reserved_at_24[0x4];
@@ -7823,17 +7910,22 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 pptx[0x1];
u8 aptx[0x1];
- u8 reserved_at_42[0x6];
+ u8 pptx_mask_n[0x1];
+ u8 reserved_at_43[0x5];
u8 pfctx[0x8];
u8 reserved_at_50[0x10];
u8 pprx[0x1];
u8 aprx[0x1];
- u8 reserved_at_62[0x6];
+ u8 pprx_mask_n[0x1];
+ u8 reserved_at_63[0x5];
u8 pfcrx[0x8];
u8 reserved_at_70[0x10];
- u8 reserved_at_80[0x80];
+ u8 device_stall_minor_watermark[0x10];
+ u8 device_stall_critical_watermark[0x10];
+
+ u8 reserved_at_a0[0x60];
};
struct mlx5_ifc_pelc_reg_bits {
@@ -7874,8 +7966,10 @@ struct mlx5_ifc_peir_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x7b];
+ u8 reserved_at_0[0x76];
+ u8 pfcc_mask[0x1];
+ u8 reserved_at_77[0x4];
u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 035f0d4dc9fe..34aed6032f86 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -151,6 +151,12 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
u8 *pfc_en_rx);
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark);
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark, u16 *stall_minor_watermark);
+
int mlx5_max_tc(struct mlx5_core_dev *mdev);
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 7e8f281f8c00..80d7aa8b2831 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -47,6 +47,7 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
+int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 64e193e87394..9208cb8809ac 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -107,6 +107,9 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
+int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
+ u64 *rx_discard_vport_down,
+ u64 *tx_discard_vport_down);
int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
int vf, u8 port_num, void *out,
size_t out_sz);
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 7ed82e4f11b3..9a36fad9e068 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -55,14 +55,6 @@ static inline bool ipmr_rule_default(const struct fib_rule *rule)
}
#endif
-struct vif_entry_notifier_info {
- struct fib_notifier_info info;
- struct net_device *dev;
- vifi_t vif_index;
- unsigned short vif_flags;
- u32 tb_id;
-};
-
#define VIFF_STATIC 0x8000
struct mfc_cache_cmp_arg {
@@ -88,33 +80,8 @@ struct mfc_cache {
};
};
-struct mfc_entry_notifier_info {
- struct fib_notifier_info info;
- struct mfc_cache *mfc;
- u32 tb_id;
-};
-
struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
struct rtmsg *rtm, u32 portid);
-
-#ifdef CONFIG_IP_MROUTE
-void ipmr_cache_free(struct mfc_cache *mfc_cache);
-#else
-static inline void ipmr_cache_free(struct mfc_cache *mfc_cache)
-{
-}
-#endif
-
-static inline void ipmr_cache_put(struct mfc_cache *c)
-{
- if (refcount_dec_and_test(&c->_c.mfc_un.res.refcount))
- ipmr_cache_free(c);
-}
-static inline void ipmr_cache_hold(struct mfc_cache *c)
-{
- refcount_inc(&c->_c.mfc_un.res.refcount);
-}
-
#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 1ac38e6819f5..c4a45859f586 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -8,6 +8,7 @@
#include <net/net_namespace.h>
#include <uapi/linux/mroute6.h>
#include <linux/mroute_base.h>
+#include <net/fib_rules.h>
#ifdef CONFIG_IPV6_MROUTE
static inline int ip6_mroute_opt(int opt)
@@ -63,6 +64,15 @@ static inline void ip6_mr_cleanup(void)
}
#endif
+#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+bool ip6mr_rule_default(const struct fib_rule *rule);
+#else
+static inline bool ip6mr_rule_default(const struct fib_rule *rule)
+{
+ return true;
+}
+#endif
+
#define VIFF_STATIC 0x8000
struct mfc6_cache_cmp_arg {
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index c2560cb50f1d..d617fe45543e 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -6,6 +6,7 @@
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/sock.h>
+#include <net/fib_notifier.h>
/**
* struct vif_device - interface representor for multicast routing
@@ -36,6 +37,58 @@ struct vif_device {
__be32 local, remote;
};
+struct vif_entry_notifier_info {
+ struct fib_notifier_info info;
+ struct net_device *dev;
+ unsigned short vif_index;
+ unsigned short vif_flags;
+ u32 tb_id;
+};
+
+static inline int mr_call_vif_notifier(struct notifier_block *nb,
+ struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct vif_device *vif,
+ unsigned short vif_index, u32 tb_id)
+{
+ struct vif_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .dev = vif->dev,
+ .vif_index = vif_index,
+ .vif_flags = vif->flags,
+ .tb_id = tb_id,
+ };
+
+ return call_fib_notifier(nb, net, event_type, &info.info);
+}
+
+static inline int mr_call_vif_notifiers(struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct vif_device *vif,
+ unsigned short vif_index, u32 tb_id,
+ unsigned int *ipmr_seq)
+{
+ struct vif_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .dev = vif->dev,
+ .vif_index = vif_index,
+ .vif_flags = vif->flags,
+ .tb_id = tb_id,
+ };
+
+ ASSERT_RTNL();
+ (*ipmr_seq)++;
+ return call_fib_notifiers(net, event_type, &info.info);
+}
+
#ifndef MAXVIFS
/* This one is nasty; value is defined in uapi using different symbols for
* mroute and morute6 but both map into same 32.
@@ -72,6 +125,7 @@ enum {
* @refcount: reference count for this entry
* @list: global entry list
* @rcu: used for entry destruction
+ * @free: Operation used for freeing an entry under RCU
*/
struct mr_mfc {
struct rhlist_head mnode;
@@ -97,8 +151,64 @@ struct mr_mfc {
} mfc_un;
struct list_head list;
struct rcu_head rcu;
+ void (*free)(struct rcu_head *head);
+};
+
+static inline void mr_cache_put(struct mr_mfc *c)
+{
+ if (refcount_dec_and_test(&c->mfc_un.res.refcount))
+ call_rcu(&c->rcu, c->free);
+}
+
+static inline void mr_cache_hold(struct mr_mfc *c)
+{
+ refcount_inc(&c->mfc_un.res.refcount);
+}
+
+struct mfc_entry_notifier_info {
+ struct fib_notifier_info info;
+ struct mr_mfc *mfc;
+ u32 tb_id;
};
+static inline int mr_call_mfc_notifier(struct notifier_block *nb,
+ struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct mr_mfc *mfc, u32 tb_id)
+{
+ struct mfc_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .mfc = mfc,
+ .tb_id = tb_id
+ };
+
+ return call_fib_notifier(nb, net, event_type, &info.info);
+}
+
+static inline int mr_call_mfc_notifiers(struct net *net,
+ unsigned short family,
+ enum fib_event_type event_type,
+ struct mr_mfc *mfc, u32 tb_id,
+ unsigned int *ipmr_seq)
+{
+ struct mfc_entry_notifier_info info = {
+ .info = {
+ .family = family,
+ .net = net,
+ },
+ .mfc = mfc,
+ .tb_id = tb_id
+ };
+
+ ASSERT_RTNL();
+ (*ipmr_seq)++;
+ return call_fib_notifiers(net, event_type, &info.info);
+}
+
struct mr_table;
/**
@@ -180,6 +290,13 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
spinlock_t *lock);
+
+int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
+ int (*rules_dump)(struct net *net,
+ struct notifier_block *nb),
+ struct mr_table *(*mr_iter)(struct net *net,
+ struct mr_table *mrt),
+ rwlock_t *mrt_lock);
#else
static inline void vif_device_init(struct vif_device *v,
struct net_device *dev,
@@ -236,6 +353,17 @@ mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
{
return -EINVAL;
}
+
+static inline int mr_dump(struct net *net, struct notifier_block *nb,
+ unsigned short family,
+ int (*rules_dump)(struct net *net,
+ struct notifier_block *nb),
+ struct mr_table *(*mr_iter)(struct net *net,
+ struct mr_table *mrt),
+ rwlock_t *mrt_lock)
+{
+ return -EINVAL;
+}
#endif
static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 913b1cc882cf..2a2d9cf50aa2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2312,43 +2312,45 @@ struct netdev_lag_lower_state_info {
#include <linux/notifier.h>
-/* netdevice notifier chain. Please remember to update the rtnetlink
- * notification exclusion list in rtnetlink_event() when adding new
- * types.
+/* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
+ * and the rtnetlink notification exclusion list in rtnetlink_event() when
+ * adding new types.
*/
-#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
-#define NETDEV_DOWN 0x0002
-#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+enum netdev_cmd {
+ NETDEV_UP = 1, /* For now you can't veto a device up/down */
+ NETDEV_DOWN,
+ NETDEV_REBOOT, /* Tell a protocol stack a network interface
detected a hardware crash and restarted
- we can use this eg to kick tcp sessions
once done */
-#define NETDEV_CHANGE 0x0004 /* Notify device state change */
-#define NETDEV_REGISTER 0x0005
-#define NETDEV_UNREGISTER 0x0006
-#define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
-#define NETDEV_CHANGEADDR 0x0008
-#define NETDEV_GOING_DOWN 0x0009
-#define NETDEV_CHANGENAME 0x000A
-#define NETDEV_FEAT_CHANGE 0x000B
-#define NETDEV_BONDING_FAILOVER 0x000C
-#define NETDEV_PRE_UP 0x000D
-#define NETDEV_PRE_TYPE_CHANGE 0x000E
-#define NETDEV_POST_TYPE_CHANGE 0x000F
-#define NETDEV_POST_INIT 0x0010
-#define NETDEV_UNREGISTER_FINAL 0x0011
-#define NETDEV_RELEASE 0x0012
-#define NETDEV_NOTIFY_PEERS 0x0013
-#define NETDEV_JOIN 0x0014
-#define NETDEV_CHANGEUPPER 0x0015
-#define NETDEV_RESEND_IGMP 0x0016
-#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
-#define NETDEV_CHANGEINFODATA 0x0018
-#define NETDEV_BONDING_INFO 0x0019
-#define NETDEV_PRECHANGEUPPER 0x001A
-#define NETDEV_CHANGELOWERSTATE 0x001B
-#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
-#define NETDEV_UDP_TUNNEL_DROP_INFO 0x001D
-#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
+ NETDEV_CHANGE, /* Notify device state change */
+ NETDEV_REGISTER,
+ NETDEV_UNREGISTER,
+ NETDEV_CHANGEMTU, /* notify after mtu change happened */
+ NETDEV_CHANGEADDR,
+ NETDEV_GOING_DOWN,
+ NETDEV_CHANGENAME,
+ NETDEV_FEAT_CHANGE,
+ NETDEV_BONDING_FAILOVER,
+ NETDEV_PRE_UP,
+ NETDEV_PRE_TYPE_CHANGE,
+ NETDEV_POST_TYPE_CHANGE,
+ NETDEV_POST_INIT,
+ NETDEV_RELEASE,
+ NETDEV_NOTIFY_PEERS,
+ NETDEV_JOIN,
+ NETDEV_CHANGEUPPER,
+ NETDEV_RESEND_IGMP,
+ NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
+ NETDEV_CHANGEINFODATA,
+ NETDEV_BONDING_INFO,
+ NETDEV_PRECHANGEUPPER,
+ NETDEV_CHANGELOWERSTATE,
+ NETDEV_UDP_TUNNEL_PUSH_INFO,
+ NETDEV_UDP_TUNNEL_DROP_INFO,
+ NETDEV_CHANGE_TX_QUEUE_LEN,
+};
+const char *netdev_cmd_to_name(enum netdev_cmd cmd);
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 2b3b350e07b7..13c8ab171437 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -110,7 +110,7 @@
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 33
-#define FW_REVISION_VERSION 1
+#define FW_REVISION_VERSION 11
#define FW_ENGINEERING_VERSION 0
/***********************/
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 9db02856623b..d9416ad5ef59 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -105,7 +105,7 @@
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
/* GFS constants */
-#define ETH_GFT_TRASH_CAN_VPORT 0x1FF
+#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
/* Destination port mode */
enum dest_port_mode {
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 4cc9b37b8d95..938df614cb6a 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -753,8 +753,8 @@ struct e4_ystorm_iscsi_task_ag_ctx {
#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
u8 flags1;
#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 15e398c7230e..b5b2bc9eacca 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -483,6 +483,15 @@ struct qed_int_info {
u8 used_cnt;
};
+#define QED_NVM_SIGNATURE 0x12435687
+
+enum qed_nvm_flash_cmd {
+ QED_NVM_FLASH_CMD_FILE_DATA = 0x2,
+ QED_NVM_FLASH_CMD_FILE_START = 0x3,
+ QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4,
+ QED_NVM_FLASH_CMD_NVM_MAX,
+};
+
struct qed_common_cb_ops {
void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
void (*link_update)(void *dev,
@@ -658,6 +667,16 @@ struct qed_common_ops {
struct qed_chain *p_chain);
/**
+ * @brief nvm_flash - Flash nvm data.
+ *
+ * @param cdev
+ * @param name - file containing the data
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*nvm_flash)(struct qed_dev *cdev, const char *name);
+
+/**
* @brief nvm_get_image - reads an entire image from nvram
*
* @param cdev
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index c1a446ebe362..480a57eb36cc 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -51,6 +51,8 @@
#define RDMA_MAX_CQS (64 * 1024)
#define RDMA_MAX_TIDS (128 * 1024 - 1)
#define RDMA_MAX_PDS (64 * 1024)
+#define RDMA_MAX_XRC_SRQS (1024)
+#define RDMA_MAX_SRQS (32 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index e15e0da71240..193bcef302e1 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -59,6 +59,9 @@ enum roce_async_events_type {
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
ROCE_ASYNC_EVENT_SRQ_EMPTY,
ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
+ ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR,
+ ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR,
+ ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR,
MAX_ROCE_ASYNC_EVENTS_TYPE
};
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 562a175c35a9..5225832bd6ff 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -36,7 +36,8 @@ extern int rtnl_is_locked(void);
extern int rtnl_lock_killable(void);
extern wait_queue_head_t netdev_unregistering_wq;
-extern struct rw_semaphore net_sem;
+extern struct rw_semaphore pernet_ops_rwsem;
+extern struct rw_semaphore net_rwsem;
#ifdef CONFIG_PROVE_LOCKING
extern bool lockdep_rtnl_is_held(void);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 2b3a6eec4570..8ae8ee004258 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -31,6 +31,11 @@ enum rxrpc_call_completion {
NR__RXRPC_CALL_COMPLETIONS
};
+/*
+ * Debug ID counter for tracing.
+ */
+extern atomic_t rxrpc_debug_id;
+
typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
unsigned long);
typedef void (*rxrpc_notify_end_tx_t)(struct sock *, struct rxrpc_call *,
@@ -50,7 +55,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
s64,
gfp_t,
rxrpc_notify_rx_t,
- bool);
+ bool,
+ unsigned int);
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
rxrpc_notify_end_tx_t);
@@ -63,7 +69,8 @@ void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *);
u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
- rxrpc_user_attach_call_t, unsigned long, gfp_t);
+ rxrpc_user_attach_call_t, unsigned long, gfp_t,
+ unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *, struct key *);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index fc40843baed3..250dac390806 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -6,6 +6,7 @@
* Copyright 2006-2010 Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -646,6 +647,8 @@ struct survey_info {
* allowed through even on unauthorized ports
* @control_port_no_encrypt: TRUE to prevent encryption of control port
* protocol frames.
+ * @control_port_over_nl80211: TRUE if userspace expects to exchange control
+ * port frames over NL80211 instead of the network interface.
* @wep_keys: static WEP keys, if not NULL points to an array of
* CFG80211_MAX_WEP_KEYS WEP keys
* @wep_tx_key: key index (0..3) of the default TX static WEP key
@@ -661,6 +664,7 @@ struct cfg80211_crypto_settings {
bool control_port;
__be16 control_port_ethertype;
bool control_port_no_encrypt;
+ bool control_port_over_nl80211;
struct key_params *wep_keys;
int wep_tx_key;
const u8 *psk;
@@ -1450,6 +1454,8 @@ struct mesh_config {
* @userspace_handles_dfs: whether user space controls DFS operation, i.e.
* changes the channel when a radar is detected. This is required
* to operate on DFS channels.
+ * @control_port_over_nl80211: TRUE if userspace expects to exchange control
+ * port frames over NL80211 instead of the network interface.
*
* These parameters are fixed when the mesh is created.
*/
@@ -1472,6 +1478,7 @@ struct mesh_setup {
u32 basic_rates;
struct cfg80211_bitrate_mask beacon_rate;
bool userspace_handles_dfs;
+ bool control_port_over_nl80211;
};
/**
@@ -2030,6 +2037,8 @@ struct cfg80211_disassoc_request {
* sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
* required to assume that the port is unauthorized until authorized by
* user space. Otherwise, port is marked authorized by default.
+ * @control_port_over_nl80211: TRUE if userspace expects to exchange control
+ * port frames over NL80211 instead of the network interface.
* @userspace_handles_dfs: whether user space controls DFS operation, i.e.
* changes the channel when a radar is detected. This is required
* to operate on DFS channels.
@@ -2053,6 +2062,7 @@ struct cfg80211_ibss_params {
bool channel_fixed;
bool privacy;
bool control_port;
+ bool control_port_over_nl80211;
bool userspace_handles_dfs;
int mcast_rate[NUM_NL80211_BANDS];
struct ieee80211_ht_cap ht_capa;
@@ -2960,6 +2970,9 @@ struct cfg80211_external_auth_params {
*
* @external_auth: indicates result of offloaded authentication processing from
* user space
+ *
+ * @tx_control_port: TX a control port frame (EAPoL). The noencrypt parameter
+ * tells the driver that the frame should not be encrypted.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3255,6 +3268,12 @@ struct cfg80211_ops {
const u8 *aa);
int (*external_auth)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_external_auth_params *params);
+
+ int (*tx_control_port)(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *buf, size_t len,
+ const u8 *dest, const __be16 proto,
+ const bool noencrypt);
};
/*
@@ -3572,15 +3591,15 @@ enum wiphy_opmode_flag {
/**
* struct sta_opmode_info - Station's ht/vht operation mode information
* @changed: contains value from &enum wiphy_opmode_flag
- * @smps_mode: New SMPS mode of a station
- * @bw: new max bandwidth value of a station
+ * @smps_mode: New SMPS mode value from &enum nl80211_smps_mode of a station
+ * @bw: new max bandwidth value from &enum nl80211_chan_width of a station
* @rx_nss: new rx_nss value of a station
*/
struct sta_opmode_info {
u32 changed;
- u8 smps_mode;
- u8 bw;
+ enum nl80211_smps_mode smps_mode;
+ enum nl80211_chan_width bw;
u8 rx_nss;
};
@@ -4657,6 +4676,33 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
*/
const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
+/**
+ * DOC: Internal regulatory db functions
+ *
+ */
+
+/**
+ * reg_query_regdb_wmm - Query internal regulatory db for wmm rule
+ * Regulatory self-managed driver can use it to proactively
+ *
+ * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
+ * @freq: the freqency(in MHz) to be queried.
+ * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
+ * irrelevant). This can be used later for deduplication.
+ * @rule: pointer to store the wmm rule from the regulatory db.
+ *
+ * Self-managed wireless drivers can use this function to query
+ * the internal regulatory database to check whether the given
+ * ISO/IEC 3166 alpha2 country and freq have wmm rule limitations.
+ *
+ * Drivers should check the return value, its possible you can get
+ * an -ENODATA.
+ *
+ * Return: 0 on success. -ENODATA.
+ */
+int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr,
+ struct ieee80211_wmm_rule *rule);
+
/*
* callbacks for asynchronous cfg80211 methods, notification
* functions and BSS handling helpers
@@ -5694,6 +5740,28 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
/**
+ * cfg80211_rx_control_port - notification about a received control port frame
+ * @dev: The device the frame matched to
+ * @buf: control port frame
+ * @len: length of the frame data
+ * @addr: The peer from which the frame was received
+ * @proto: frame protocol, typically PAE or Pre-authentication
+ * @unencrypted: Whether the frame was received unencrypted
+ *
+ * This function is used to inform userspace about a received control port
+ * frame. It should only be used if userspace indicated it wants to receive
+ * control port frames over nl80211.
+ *
+ * The frame is the data portion of the 802.3 or 802.11 data frame with all
+ * network layer headers removed (e.g. the raw EAPoL frame).
+ *
+ * Return: %true if the frame was passed to userspace
+ */
+bool cfg80211_rx_control_port(struct net_device *dev,
+ const u8 *buf, size_t len,
+ const u8 *addr, u16 proto, bool unencrypted);
+
+/**
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
* @dev: network device
* @rssi_event: the triggered RSSI event
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 2449982daf75..d2279b2d61aa 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -302,6 +302,8 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed
* @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected
* keep alive) changed.
+ * @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface
+ *
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -329,6 +331,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_OCB = 1<<22,
BSS_CHANGED_MU_GROUPS = 1<<23,
BSS_CHANGED_KEEP_ALIVE = 1<<24,
+ BSS_CHANGED_MCAST_RATE = 1<<25,
/* when adding here, make sure to change ieee80211_reconfig */
};
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 09e30bdc7876..47e35cce3b64 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -60,9 +60,10 @@ struct net {
struct list_head list; /* list of network namespaces */
struct list_head exit_list; /* To linked to call pernet exit
- * methods on dead net (net_sem
- * read locked), or to unregister
- * pernet ops (net_sem wr locked).
+ * methods on dead net (
+ * pernet_ops_rwsem read locked),
+ * or to unregister pernet ops
+ * (pernet_ops_rwsem write locked).
*/
struct llist_node cleanup_list; /* namespaces on death row */
@@ -95,8 +96,9 @@ struct net {
/* core fib_rules */
struct list_head rules_ops;
- struct list_head fib_notifier_ops; /* protected by net_sem */
-
+ struct list_head fib_notifier_ops; /* Populated by
+ * register_pernet_subsys()
+ */
struct net_device *loopback_dev; /* The loopback */
struct netns_core core;
struct netns_mib mib;
@@ -289,6 +291,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
#endif
}
+/* Protected by net_rwsem */
#define for_each_net(VAR) \
list_for_each_entry(VAR, &net_namespace_list, list)
@@ -321,6 +324,10 @@ struct pernet_operations {
* have to keep in mind all other pernet_operations and
* to introduce a locking, if they share common resources.
*
+ * The only time they are called with exclusive lock is
+ * from register_pernet_subsys(), unregister_pernet_subsys()
+ * register_pernet_device() and unregister_pernet_device().
+ *
* Exit methods using blocking RCU primitives, such as
* synchronize_rcu(), should be implemented via exit_batch.
* Then, destruction of a group of net requires single
@@ -333,12 +340,6 @@ struct pernet_operations {
void (*exit_batch)(struct list_head *net_exit_list);
unsigned int *id;
size_t size;
- /*
- * Indicates above methods are allowed to be executed in parallel
- * with methods of any other pernet_operations, i.e. they are not
- * need write locked net_sem.
- */
- bool async;
};
/*
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 5b51110435fc..c29f09cfc9d7 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -96,6 +96,8 @@ struct netns_ipv6 {
atomic_t fib6_sernum;
struct seg6_pernet_data *seg6_data;
struct fib_notifier_ops *notifier_ops;
+ struct fib_notifier_ops *ip6mr_notifier_ops;
+ unsigned int ipmr_seq; /* protected by rtnl_mutex */
struct {
struct hlist_head head;
spinlock_t lock;
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index f83cacce3308..60f8cc86a447 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -4,6 +4,7 @@
* regulatory support structures
*
* Copyright 2008-2009 Luis R. Rodriguez <[email protected]>
+ * Copyright (C) 2018 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -188,9 +189,35 @@ struct ieee80211_power_rule {
u32 max_eirp;
};
+/**
+ * struct ieee80211_wmm_ac - used to store per ac wmm regulatory limitation
+ *
+ * The information provided in this structure is required for QoS
+ * transmit queue configuration. Cf. IEEE 802.11 7.3.2.29.
+ *
+ * @cw_min: minimum contention window [a value of the form
+ * 2^n-1 in the range 1..32767]
+ * @cw_max: maximum contention window [like @cw_min]
+ * @cot: maximum burst time in units of 32 usecs, 0 meaning disabled
+ * @aifsn: arbitration interframe space [0..255]
+ *
+ */
+struct ieee80211_wmm_ac {
+ u16 cw_min;
+ u16 cw_max;
+ u16 cot;
+ u8 aifsn;
+};
+
+struct ieee80211_wmm_rule {
+ struct ieee80211_wmm_ac client[IEEE80211_NUM_ACS];
+ struct ieee80211_wmm_ac ap[IEEE80211_NUM_ACS];
+};
+
struct ieee80211_reg_rule {
struct ieee80211_freq_range freq_range;
struct ieee80211_power_rule power_rule;
+ struct ieee80211_wmm_rule *wmm_rule;
u32 flags;
u32 dfs_cac_ms;
};
@@ -198,6 +225,7 @@ struct ieee80211_reg_rule {
struct ieee80211_regdomain {
struct rcu_head rcu_head;
u32 n_reg_rules;
+ u32 n_wmm_rules;
char alpha2[3];
enum nl80211_dfs_regions dfs_region;
struct ieee80211_reg_rule reg_rules[];
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 012fb3e2f4cf..c63249ea34c3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1341,12 +1341,12 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
const struct sctp_endpoint *ep,
const union sctp_addr *paddr,
struct sctp_transport **);
-int sctp_endpoint_is_peeled_off(struct sctp_endpoint *,
- const union sctp_addr *);
+bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
+ const union sctp_addr *paddr);
struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *,
struct net *, const union sctp_addr *);
-int sctp_has_association(struct net *net, const union sctp_addr *laddr,
- const union sctp_addr *paddr);
+bool sctp_has_association(struct net *net, const union sctp_addr *laddr,
+ const union sctp_addr *paddr);
int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index ff3ed435701f..6eb174753acf 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2122,8 +2122,8 @@ struct ib_device {
* net device of device @device at port @port_num or NULL if such
* a net device doesn't exist. The vendor driver should call dev_hold
* on this net device. The HW vendor's device driver must guarantee
- * that this function returns NULL before the net device reaches
- * NETDEV_UNREGISTER_FINAL state.
+ * that this function returns NULL before the net device has finished
+ * NETDEV_UNREGISTER state.
*/
struct net_device *(*get_netdev)(struct ib_device *device,
u8 port_num);
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 6b59c63a8e51..63815f66b274 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -133,8 +133,7 @@ TRACE_EVENT(afs_recv_data,
TP_ARGS(call, count, offset, want_more, ret),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, rxcall )
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(enum afs_call_state, state )
__field(unsigned int, count )
__field(unsigned int, offset )
@@ -144,8 +143,7 @@ TRACE_EVENT(afs_recv_data,
),
TP_fast_assign(
- __entry->rxcall = call->rxcall;
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->state = call->state;
__entry->unmarshall = call->unmarshall;
__entry->count = count;
@@ -154,8 +152,7 @@ TRACE_EVENT(afs_recv_data,
__entry->ret = ret;
),
- TP_printk("c=%p ac=%p s=%u u=%u %u/%u wm=%u ret=%d",
- __entry->rxcall,
+ TP_printk("c=%08x s=%u u=%u %u/%u wm=%u ret=%d",
__entry->call,
__entry->state, __entry->unmarshall,
__entry->offset, __entry->count,
@@ -168,21 +165,18 @@ TRACE_EVENT(afs_notify_call,
TP_ARGS(rxcall, call),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, rxcall )
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(enum afs_call_state, state )
__field(unsigned short, unmarshall )
),
TP_fast_assign(
- __entry->rxcall = rxcall;
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->state = call->state;
__entry->unmarshall = call->unmarshall;
),
- TP_printk("c=%p ac=%p s=%u u=%u",
- __entry->rxcall,
+ TP_printk("c=%08x s=%u u=%u",
__entry->call,
__entry->state, __entry->unmarshall)
);
@@ -193,21 +187,18 @@ TRACE_EVENT(afs_cb_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, rxcall )
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(const char *, name )
__field(u32, op )
),
TP_fast_assign(
- __entry->rxcall = call->rxcall;
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->name = call->type->name;
__entry->op = call->operation_ID;
),
- TP_printk("c=%p ac=%p %s o=%u",
- __entry->rxcall,
+ TP_printk("c=%08x %s o=%u",
__entry->call,
__entry->name,
__entry->op)
@@ -220,7 +211,7 @@ TRACE_EVENT(afs_call,
TP_ARGS(call, op, usage, outstanding, where),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(int, op )
__field(int, usage )
__field(int, outstanding )
@@ -228,14 +219,14 @@ TRACE_EVENT(afs_call,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->outstanding = outstanding;
__entry->where = where;
),
- TP_printk("c=%p %s u=%d o=%d sp=%pSR",
+ TP_printk("c=%08x %s u=%d o=%d sp=%pSR",
__entry->call,
__print_symbolic(__entry->op, afs_call_traces),
__entry->usage,
@@ -249,13 +240,13 @@ TRACE_EVENT(afs_make_fs_call,
TP_ARGS(call, fid),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(enum afs_fs_operation, op )
__field_struct(struct afs_fid, fid )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->op = call->operation_ID;
if (fid) {
__entry->fid = *fid;
@@ -266,7 +257,7 @@ TRACE_EVENT(afs_make_fs_call,
}
),
- TP_printk("c=%p %06x:%06x:%06x %s",
+ TP_printk("c=%08x %06x:%06x:%06x %s",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -280,16 +271,16 @@ TRACE_EVENT(afs_make_vl_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(enum afs_vl_operation, op )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->op = call->operation_ID;
),
- TP_printk("c=%p %s",
+ TP_printk("c=%08x %s",
__entry->call,
__print_symbolic(__entry->op, afs_vl_operations))
);
@@ -300,20 +291,20 @@ TRACE_EVENT(afs_call_done,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(struct rxrpc_call *, rx_call )
__field(int, ret )
__field(u32, abort_code )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->rx_call = call->rxcall;
__entry->ret = call->error;
__entry->abort_code = call->abort_code;
),
- TP_printk(" c=%p ret=%d ab=%d [%p]",
+ TP_printk(" c=%08x ret=%d ab=%d [%p]",
__entry->call,
__entry->ret,
__entry->abort_code,
@@ -327,7 +318,7 @@ TRACE_EVENT(afs_send_pages,
TP_ARGS(call, msg, first, last, offset),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(pgoff_t, first )
__field(pgoff_t, last )
__field(unsigned int, nr )
@@ -337,7 +328,7 @@ TRACE_EVENT(afs_send_pages,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->first = first;
__entry->last = last;
__entry->nr = msg->msg_iter.nr_segs;
@@ -346,7 +337,7 @@ TRACE_EVENT(afs_send_pages,
__entry->flags = msg->msg_flags;
),
- TP_printk(" c=%p %lx-%lx-%lx b=%x o=%x f=%x",
+ TP_printk(" c=%08x %lx-%lx-%lx b=%x o=%x f=%x",
__entry->call,
__entry->first, __entry->first + __entry->nr - 1, __entry->last,
__entry->bytes, __entry->offset,
@@ -360,7 +351,7 @@ TRACE_EVENT(afs_sent_pages,
TP_ARGS(call, first, last, cursor, ret),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(pgoff_t, first )
__field(pgoff_t, last )
__field(pgoff_t, cursor )
@@ -368,14 +359,14 @@ TRACE_EVENT(afs_sent_pages,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->first = first;
__entry->last = last;
__entry->cursor = cursor;
__entry->ret = ret;
),
- TP_printk(" c=%p %lx-%lx c=%lx r=%d",
+ TP_printk(" c=%08x %lx-%lx c=%lx r=%d",
__entry->call,
__entry->first, __entry->last,
__entry->cursor, __entry->ret)
@@ -450,7 +441,7 @@ TRACE_EVENT(afs_call_state,
TP_ARGS(call, from, to, ret, remote_abort),
TP_STRUCT__entry(
- __field(struct afs_call *, call )
+ __field(unsigned int, call )
__field(enum afs_call_state, from )
__field(enum afs_call_state, to )
__field(int, ret )
@@ -458,14 +449,14 @@ TRACE_EVENT(afs_call_state,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->from = from;
__entry->to = to;
__entry->ret = ret;
__entry->abort = remote_abort;
),
- TP_printk("c=%p %u->%u r=%d ab=%d",
+ TP_printk("c=%08x %u->%u r=%d ab=%d",
__entry->call,
__entry->from, __entry->to,
__entry->ret, __entry->abort)
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 36cb50c111a6..2ea788f6f95d 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -400,6 +400,13 @@ enum rxrpc_congest_change {
EM(RXRPC_ACK_IDLE, "IDL") \
E_(RXRPC_ACK__INVALID, "-?-")
+#define rxrpc_completions \
+ EM(RXRPC_CALL_SUCCEEDED, "Succeeded") \
+ EM(RXRPC_CALL_REMOTELY_ABORTED, "RemoteAbort") \
+ EM(RXRPC_CALL_LOCALLY_ABORTED, "LocalAbort") \
+ EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \
+ E_(RXRPC_CALL_NETWORK_ERROR, "NetError")
+
/*
* Export enum symbols via userspace.
*/
@@ -420,6 +427,7 @@ rxrpc_rtt_rx_traces;
rxrpc_timer_traces;
rxrpc_propose_ack_traces;
rxrpc_propose_ack_outcomes;
+rxrpc_congest_modes;
rxrpc_congest_changes;
/*
@@ -438,20 +446,20 @@ TRACE_EVENT(rxrpc_conn,
TP_ARGS(conn, op, usage, where),
TP_STRUCT__entry(
- __field(struct rxrpc_connection *, conn )
- __field(int, op )
- __field(int, usage )
- __field(const void *, where )
+ __field(unsigned int, conn )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
),
TP_fast_assign(
- __entry->conn = conn;
+ __entry->conn = conn->debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
),
- TP_printk("C=%p %s u=%d sp=%pSR",
+ TP_printk("C=%08x %s u=%d sp=%pSR",
__entry->conn,
__print_symbolic(__entry->op, rxrpc_conn_traces),
__entry->usage,
@@ -465,7 +473,7 @@ TRACE_EVENT(rxrpc_client,
TP_ARGS(conn, channel, op),
TP_STRUCT__entry(
- __field(struct rxrpc_connection *, conn )
+ __field(unsigned int, conn )
__field(u32, cid )
__field(int, channel )
__field(int, usage )
@@ -474,7 +482,7 @@ TRACE_EVENT(rxrpc_client,
),
TP_fast_assign(
- __entry->conn = conn;
+ __entry->conn = conn->debug_id;
__entry->channel = channel;
__entry->usage = atomic_read(&conn->usage);
__entry->op = op;
@@ -482,7 +490,7 @@ TRACE_EVENT(rxrpc_client,
__entry->cs = conn->cache_state;
),
- TP_printk("C=%p h=%2d %s %s i=%08x u=%d",
+ TP_printk("C=%08x h=%2d %s %s i=%08x u=%d",
__entry->conn,
__entry->channel,
__print_symbolic(__entry->op, rxrpc_client_traces),
@@ -498,7 +506,7 @@ TRACE_EVENT(rxrpc_call,
TP_ARGS(call, op, usage, where, aux),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(int, op )
__field(int, usage )
__field(const void *, where )
@@ -506,14 +514,14 @@ TRACE_EVENT(rxrpc_call,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
__entry->aux = aux;
),
- TP_printk("c=%p %s u=%d sp=%pSR a=%p",
+ TP_printk("c=%08x %s u=%d sp=%pSR a=%p",
__entry->call,
__print_symbolic(__entry->op, rxrpc_call_traces),
__entry->usage,
@@ -592,12 +600,13 @@ TRACE_EVENT(rxrpc_rx_done,
);
TRACE_EVENT(rxrpc_abort,
- TP_PROTO(const char *why, u32 cid, u32 call_id, rxrpc_seq_t seq,
- int abort_code, int error),
+ TP_PROTO(unsigned int call_nr, const char *why, u32 cid, u32 call_id,
+ rxrpc_seq_t seq, int abort_code, int error),
- TP_ARGS(why, cid, call_id, seq, abort_code, error),
+ TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error),
TP_STRUCT__entry(
+ __field(unsigned int, call_nr )
__array(char, why, 4 )
__field(u32, cid )
__field(u32, call_id )
@@ -608,6 +617,7 @@ TRACE_EVENT(rxrpc_abort,
TP_fast_assign(
memcpy(__entry->why, why, 4);
+ __entry->call_nr = call_nr;
__entry->cid = cid;
__entry->call_id = call_id;
__entry->abort_code = abort_code;
@@ -615,18 +625,45 @@ TRACE_EVENT(rxrpc_abort,
__entry->seq = seq;
),
- TP_printk("%08x:%08x s=%u a=%d e=%d %s",
+ TP_printk("c=%08x %08x:%08x s=%u a=%d e=%d %s",
+ __entry->call_nr,
__entry->cid, __entry->call_id, __entry->seq,
__entry->abort_code, __entry->error, __entry->why)
);
+TRACE_EVENT(rxrpc_call_complete,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(enum rxrpc_call_completion, compl )
+ __field(int, error )
+ __field(u32, abort_code )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->compl = call->completion;
+ __entry->error = call->error;
+ __entry->abort_code = call->abort_code;
+ ),
+
+ TP_printk("c=%08x %s r=%d ac=%d",
+ __entry->call,
+ __print_symbolic(__entry->compl, rxrpc_completions),
+ __entry->error,
+ __entry->abort_code)
+ );
+
TRACE_EVENT(rxrpc_transmit,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why),
TP_ARGS(call, why),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_transmit_trace, why )
__field(rxrpc_seq_t, tx_hard_ack )
__field(rxrpc_seq_t, tx_top )
@@ -634,14 +671,14 @@ TRACE_EVENT(rxrpc_transmit,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->tx_hard_ack = call->tx_hard_ack;
__entry->tx_top = call->tx_top;
__entry->tx_winsize = call->tx_winsize;
),
- TP_printk("c=%p %s f=%08x n=%u/%u",
+ TP_printk("c=%08x %s f=%08x n=%u/%u",
__entry->call,
__print_symbolic(__entry->why, rxrpc_transmit_traces),
__entry->tx_hard_ack + 1,
@@ -656,7 +693,7 @@ TRACE_EVENT(rxrpc_rx_data,
TP_ARGS(call, seq, serial, flags, anno),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_seq_t, seq )
__field(rxrpc_serial_t, serial )
__field(u8, flags )
@@ -664,14 +701,14 @@ TRACE_EVENT(rxrpc_rx_data,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
__entry->anno = anno;
),
- TP_printk("c=%p DATA %08x q=%08x fl=%02x a=%02x",
+ TP_printk("c=%08x DATA %08x q=%08x fl=%02x a=%02x",
__entry->call,
__entry->serial,
__entry->seq,
@@ -687,7 +724,7 @@ TRACE_EVENT(rxrpc_rx_ack,
TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_serial_t, serial )
__field(rxrpc_serial_t, ack_serial )
__field(rxrpc_seq_t, first )
@@ -697,7 +734,7 @@ TRACE_EVENT(rxrpc_rx_ack,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->serial = serial;
__entry->ack_serial = ack_serial;
__entry->first = first;
@@ -706,7 +743,7 @@ TRACE_EVENT(rxrpc_rx_ack,
__entry->n_acks = n_acks;
),
- TP_printk("c=%p %08x %s r=%08x f=%08x p=%08x n=%u",
+ TP_printk("c=%08x %08x %s r=%08x f=%08x p=%08x n=%u",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
@@ -723,18 +760,18 @@ TRACE_EVENT(rxrpc_rx_abort,
TP_ARGS(call, serial, abort_code),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_serial_t, serial )
__field(u32, abort_code )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->serial = serial;
__entry->abort_code = abort_code;
),
- TP_printk("c=%p ABORT %08x ac=%d",
+ TP_printk("c=%08x ABORT %08x ac=%d",
__entry->call,
__entry->serial,
__entry->abort_code)
@@ -747,20 +784,20 @@ TRACE_EVENT(rxrpc_rx_rwind_change,
TP_ARGS(call, serial, rwind, wake),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_serial_t, serial )
__field(u32, rwind )
__field(bool, wake )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->serial = serial;
__entry->rwind = rwind;
__entry->wake = wake;
),
- TP_printk("c=%p %08x rw=%u%s",
+ TP_printk("c=%08x %08x rw=%u%s",
__entry->call,
__entry->serial,
__entry->rwind,
@@ -774,7 +811,7 @@ TRACE_EVENT(rxrpc_tx_data,
TP_ARGS(call, seq, serial, flags, retrans, lose),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_seq_t, seq )
__field(rxrpc_serial_t, serial )
__field(u8, flags )
@@ -783,7 +820,7 @@ TRACE_EVENT(rxrpc_tx_data,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
@@ -791,7 +828,7 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->lose = lose;
),
- TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s",
+ TP_printk("c=%08x DATA %08x q=%08x fl=%02x%s%s",
__entry->call,
__entry->serial,
__entry->seq,
@@ -808,7 +845,7 @@ TRACE_EVENT(rxrpc_tx_ack,
TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_serial_t, serial )
__field(rxrpc_seq_t, ack_first )
__field(rxrpc_serial_t, ack_serial )
@@ -817,7 +854,7 @@ TRACE_EVENT(rxrpc_tx_ack,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call ? call->debug_id : 0;
__entry->serial = serial;
__entry->ack_first = ack_first;
__entry->ack_serial = ack_serial;
@@ -825,7 +862,7 @@ TRACE_EVENT(rxrpc_tx_ack,
__entry->n_acks = n_acks;
),
- TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u",
+ TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
@@ -841,7 +878,7 @@ TRACE_EVENT(rxrpc_receive,
TP_ARGS(call, why, serial, seq),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_receive_trace, why )
__field(rxrpc_serial_t, serial )
__field(rxrpc_seq_t, seq )
@@ -850,7 +887,7 @@ TRACE_EVENT(rxrpc_receive,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->serial = serial;
__entry->seq = seq;
@@ -858,7 +895,7 @@ TRACE_EVENT(rxrpc_receive,
__entry->top = call->rx_top;
),
- TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x",
+ TP_printk("c=%08x %s r=%08x q=%08x w=%08x-%08x",
__entry->call,
__print_symbolic(__entry->why, rxrpc_receive_traces),
__entry->serial,
@@ -875,7 +912,7 @@ TRACE_EVENT(rxrpc_recvmsg,
TP_ARGS(call, why, seq, offset, len, ret),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_recvmsg_trace, why )
__field(rxrpc_seq_t, seq )
__field(unsigned int, offset )
@@ -884,7 +921,7 @@ TRACE_EVENT(rxrpc_recvmsg,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->seq = seq;
__entry->offset = offset;
@@ -892,7 +929,7 @@ TRACE_EVENT(rxrpc_recvmsg,
__entry->ret = ret;
),
- TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d",
+ TP_printk("c=%08x %s q=%08x o=%u l=%u ret=%d",
__entry->call,
__print_symbolic(__entry->why, rxrpc_recvmsg_traces),
__entry->seq,
@@ -908,18 +945,18 @@ TRACE_EVENT(rxrpc_rtt_tx,
TP_ARGS(call, why, send_serial),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_rtt_tx_trace, why )
__field(rxrpc_serial_t, send_serial )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->send_serial = send_serial;
),
- TP_printk("c=%p %s sr=%08x",
+ TP_printk("c=%08x %s sr=%08x",
__entry->call,
__print_symbolic(__entry->why, rxrpc_rtt_tx_traces),
__entry->send_serial)
@@ -933,7 +970,7 @@ TRACE_EVENT(rxrpc_rtt_rx,
TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_rtt_rx_trace, why )
__field(u8, nr )
__field(rxrpc_serial_t, send_serial )
@@ -943,7 +980,7 @@ TRACE_EVENT(rxrpc_rtt_rx,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->send_serial = send_serial;
__entry->resp_serial = resp_serial;
@@ -952,7 +989,7 @@ TRACE_EVENT(rxrpc_rtt_rx,
__entry->avg = avg;
),
- TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
+ TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
__entry->send_serial,
@@ -969,7 +1006,7 @@ TRACE_EVENT(rxrpc_timer,
TP_ARGS(call, why, now),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_timer_trace, why )
__field(long, now )
__field(long, ack_at )
@@ -983,7 +1020,7 @@ TRACE_EVENT(rxrpc_timer,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->now = now;
__entry->ack_at = call->ack_at;
@@ -995,7 +1032,7 @@ TRACE_EVENT(rxrpc_timer,
__entry->timer = call->timer.expires;
),
- TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
+ TP_printk("c=%08x %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_timer_traces),
__entry->ack_at - __entry->now,
@@ -1038,7 +1075,7 @@ TRACE_EVENT(rxrpc_propose_ack,
outcome),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_propose_ack_trace, why )
__field(rxrpc_serial_t, serial )
__field(u8, ack_reason )
@@ -1048,7 +1085,7 @@ TRACE_EVENT(rxrpc_propose_ack,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->why = why;
__entry->serial = serial;
__entry->ack_reason = ack_reason;
@@ -1057,7 +1094,7 @@ TRACE_EVENT(rxrpc_propose_ack,
__entry->outcome = outcome;
),
- TP_printk("c=%p %s %s r=%08x i=%u b=%u%s",
+ TP_printk("c=%08x %s %s r=%08x i=%u b=%u%s",
__entry->call,
__print_symbolic(__entry->why, rxrpc_propose_ack_traces),
__print_symbolic(__entry->ack_reason, rxrpc_ack_names),
@@ -1074,20 +1111,20 @@ TRACE_EVENT(rxrpc_retransmit,
TP_ARGS(call, seq, annotation, expiry),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_seq_t, seq )
__field(u8, annotation )
__field(s64, expiry )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->seq = seq;
__entry->annotation = annotation;
__entry->expiry = expiry;
),
- TP_printk("c=%p q=%x a=%02x xp=%lld",
+ TP_printk("c=%08x q=%x a=%02x xp=%lld",
__entry->call,
__entry->seq,
__entry->annotation,
@@ -1101,7 +1138,7 @@ TRACE_EVENT(rxrpc_congest,
TP_ARGS(call, summary, ack_serial, change),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(enum rxrpc_congest_change, change )
__field(rxrpc_seq_t, hard_ack )
__field(rxrpc_seq_t, top )
@@ -1111,7 +1148,7 @@ TRACE_EVENT(rxrpc_congest,
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->change = change;
__entry->hard_ack = call->tx_hard_ack;
__entry->top = call->tx_top;
@@ -1120,7 +1157,7 @@ TRACE_EVENT(rxrpc_congest,
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
),
- TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
__entry->ack_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
@@ -1145,16 +1182,16 @@ TRACE_EVENT(rxrpc_disconnect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(u32, abort_code )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->abort_code = call->abort_code;
),
- TP_printk("c=%p ab=%08x",
+ TP_printk("c=%08x ab=%08x",
__entry->call,
__entry->abort_code)
);
@@ -1165,16 +1202,16 @@ TRACE_EVENT(rxrpc_improper_term,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(u32, abort_code )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->abort_code = call->abort_code;
),
- TP_printk("c=%p ab=%08x",
+ TP_printk("c=%08x ab=%08x",
__entry->call,
__entry->abort_code)
);
@@ -1186,18 +1223,18 @@ TRACE_EVENT(rxrpc_rx_eproto,
TP_ARGS(call, serial, why),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(rxrpc_serial_t, serial )
__field(const char *, why )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->serial = serial;
__entry->why = why;
),
- TP_printk("c=%p EPROTO %08x %s",
+ TP_printk("c=%08x EPROTO %08x %s",
__entry->call,
__entry->serial,
__entry->why)
@@ -1209,26 +1246,49 @@ TRACE_EVENT(rxrpc_connect_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(struct rxrpc_call *, call )
+ __field(unsigned int, call )
__field(unsigned long, user_call_ID )
__field(u32, cid )
__field(u32, call_id )
),
TP_fast_assign(
- __entry->call = call;
+ __entry->call = call->debug_id;
__entry->user_call_ID = call->user_call_ID;
__entry->cid = call->cid;
__entry->call_id = call->call_id;
),
- TP_printk("c=%p u=%p %08x:%08x",
+ TP_printk("c=%08x u=%p %08x:%08x",
__entry->call,
(void *)__entry->user_call_ID,
__entry->cid,
__entry->call_id)
);
+TRACE_EVENT(rxrpc_resend,
+ TP_PROTO(struct rxrpc_call *call, int ix),
+
+ TP_ARGS(call, ix),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(int, ix )
+ __array(u8, anno, 64 )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->ix = ix;
+ memcpy(__entry->anno, call->rxtx_annotations, 64);
+ ),
+
+ TP_printk("c=%08x ix=%u a=%64phN",
+ __entry->call,
+ __entry->ix,
+ __entry->anno)
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 20da156aaf64..4ca65b56084f 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -217,10 +217,14 @@ struct ethtool_value {
__u32 data;
};
+#define PFC_STORM_PREVENTION_AUTO 0xffff
+#define PFC_STORM_PREVENTION_DISABLE 0
+
enum tunable_id {
ETHTOOL_ID_UNSPEC,
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
+ ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
/*
* Add your fresh new tubale attribute above and remember to update
* tunable_strings[] in net/core/ethtool.c
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index c13c84304be3..15daf5e2638d 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -542,7 +542,8 @@
* IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP,
* %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
* %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
- * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, %NL80211_ATTR_MAC_HINT, and
+ * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT,
+ * %NL80211_ATTR_CONTROL_PORT_OVER_NL80211, %NL80211_ATTR_MAC_HINT, and
* %NL80211_ATTR_WIPHY_FREQ_HINT.
* If included, %NL80211_ATTR_MAC and %NL80211_ATTR_WIPHY_FREQ are
* restrictions on BSS selection, i.e., they effectively prevent roaming
@@ -990,6 +991,17 @@
* &NL80211_CMD_CONNECT or &NL80211_CMD_ROAM. If the 4 way handshake failed
* &NL80211_CMD_DISCONNECT should be indicated instead.
*
+ * @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request
+ * and RX notification. This command is used both as a request to transmit
+ * a control port frame and as a notification that a control port frame
+ * has been received. %NL80211_ATTR_FRAME is used to specify the
+ * frame contents. The frame is the raw EAPoL data, without ethernet or
+ * 802.11 headers.
+ * When used as an event indication %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
+ * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT and %NL80211_ATTR_MAC are added
+ * indicating the protocol type of the received frame; whether the frame
+ * was received unencrypted and the MAC address of the peer respectively.
+ *
* @NL80211_CMD_RELOAD_REGDB: Request that the regdb firmware file is reloaded.
*
* @NL80211_CMD_EXTERNAL_AUTH: This interface is exclusively defined for host
@@ -1228,6 +1240,8 @@ enum nl80211_commands {
NL80211_CMD_STA_OPMODE_CHANGED,
+ NL80211_CMD_CONTROL_PORT_FRAME,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1475,6 +1489,15 @@ enum nl80211_commands {
* @NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT: When included along with
* %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, indicates that the custom
* ethertype frames used for key negotiation must not be encrypted.
+ * @NL80211_ATTR_CONTROL_PORT_OVER_NL80211: A flag indicating whether control
+ * port frames (e.g. of type given in %NL80211_ATTR_CONTROL_PORT_ETHERTYPE)
+ * will be sent directly to the network interface or sent via the NL80211
+ * socket. If this attribute is missing, then legacy behavior of sending
+ * control port frames directly to the network interface is used. If the
+ * flag is included, then control port frames are sent over NL80211 instead
+ * using %CMD_CONTROL_PORT_FRAME. If control port routing over NL80211 is
+ * to be used then userspace must also use the %NL80211_ATTR_SOCKET_OWNER
+ * flag.
*
* @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver.
* We recommend using nested, driver-specific attributes within this.
@@ -1962,6 +1985,12 @@ enum nl80211_commands {
* multicast group.
* If set during %NL80211_CMD_ASSOCIATE or %NL80211_CMD_CONNECT the
* station will deauthenticate when the socket is closed.
+ * If set during %NL80211_CMD_JOIN_IBSS the IBSS will be automatically
+ * torn down when the socket is closed.
+ * If set during %NL80211_CMD_JOIN_MESH the mesh setup will be
+ * automatically torn down when the socket is closed.
+ * If set during %NL80211_CMD_START_AP the AP will be automatically
+ * disabled when the socket is closed.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -2628,6 +2657,8 @@ enum nl80211_attrs {
NL80211_ATTR_NSS,
NL80211_ATTR_ACK_SIGNAL,
+ NL80211_ATTR_CONTROL_PORT_OVER_NL80211,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -4999,6 +5030,14 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_LOW_SPAN_SCAN: Driver supports low span scan.
* @NL80211_EXT_FEATURE_LOW_POWER_SCAN: Driver supports low power scan.
* @NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN: Driver supports high accuracy scan.
+ * @NL80211_EXT_FEATURE_DFS_OFFLOAD: HW/driver will offload DFS actions.
+ * Device or driver will do all DFS-related actions by itself,
+ * informing user-space about CAC progress, radar detection event,
+ * channel change triggered by radar detection event.
+ * No need to start CAC from user-space, no need to react to
+ * "radar detected" event.
+ * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211: Driver supports sending and
+ * receiving control port frames over nl80211 instead of the netdevice.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -5029,6 +5068,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_LOW_SPAN_SCAN,
NL80211_EXT_FEATURE_LOW_POWER_SCAN,
NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN,
+ NL80211_EXT_FEATURE_DFS_OFFLOAD,
+ NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5204,6 +5245,8 @@ enum nl80211_smps_mode {
* non-operating channel is expired and no longer valid. New CAC must
* be done on this channel before starting the operation. This is not
* applicable for ETSI dfs domain where pre-CAC is valid for ever.
+ * @NL80211_RADAR_CAC_STARTED: Channel Availability Check has been started,
+ * should be generated by HW if NL80211_EXT_FEATURE_DFS_OFFLOAD is enabled.
*/
enum nl80211_radar_event {
NL80211_RADAR_DETECTED,
@@ -5211,6 +5254,7 @@ enum nl80211_radar_event {
NL80211_RADAR_CAC_ABORTED,
NL80211_RADAR_NOP_FINISHED,
NL80211_RADAR_PRE_CAC_EXPIRED,
+ NL80211_RADAR_CAC_STARTED,
};
/**
diff --git a/kernel/audit.c b/kernel/audit.c
index 5e49b614d0e6..227db99b0f19 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1526,7 +1526,6 @@ static struct pernet_operations audit_net_ops __net_initdata = {
.exit = audit_net_exit,
.id = &audit_net_id,
.size = sizeof(struct audit_net),
- .async = true,
};
/* Initialize audit support at boot time. */
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index fa10ad8e9b17..15ea216a67ce 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -724,7 +724,6 @@ static void uevent_net_exit(struct net *net)
static struct pernet_operations uevent_net_ops = {
.init = uevent_net_init,
.exit = uevent_net_exit,
- .async = true,
};
static int __init kobject_uevent_init(void)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index b2badf6b23cd..8e157806df7a 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6649,7 +6649,7 @@ static __init int test_skb_segment(void)
}
segs = skb_segment(skb, features);
- if (segs) {
+ if (!IS_ERR(segs)) {
kfree_skb_list(segs);
ret = 0;
pr_info("%s: success in skb_segment!", __func__);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index bd0ed39f65fb..bad01b14a4ad 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -729,7 +729,6 @@ static struct pernet_operations vlan_net_ops = {
.exit = vlan_exit_net,
.id = &vlan_net_id,
.size = sizeof(struct vlan_net),
- .async = true,
};
static int __init vlan_proto_init(void)
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index a662ccc166df..a627a5db2125 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -148,8 +148,8 @@ int __net_init vlan_proc_init(struct net *net)
if (!vn->proc_vlan_dir)
goto err;
- vn->proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR,
- vn->proc_vlan_dir, &vlan_fops);
+ vn->proc_vlan_conf = proc_create(name_conf, S_IFREG | 0600,
+ vn->proc_vlan_dir, &vlan_fops);
if (!vn->proc_vlan_conf)
goto err;
return 0;
@@ -172,7 +172,7 @@ int vlan_proc_add_dev(struct net_device *vlandev)
if (!strcmp(vlandev->name, name_conf))
return -EINVAL;
vlan->dent =
- proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
+ proc_create_data(vlandev->name, S_IFREG | 0600,
vn->proc_vlan_dir, &vlandev_fops, vlandev);
if (!vlan->dent)
return -ENOBUFS;
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index a3bf9d519193..7214aea14cb3 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -257,22 +257,22 @@ int __init atalk_proc_init(void)
if (!atalk_proc_dir)
goto out;
- p = proc_create("interface", S_IRUGO, atalk_proc_dir,
+ p = proc_create("interface", 0444, atalk_proc_dir,
&atalk_seq_interface_fops);
if (!p)
goto out_interface;
- p = proc_create("route", S_IRUGO, atalk_proc_dir,
+ p = proc_create("route", 0444, atalk_proc_dir,
&atalk_seq_route_fops);
if (!p)
goto out_route;
- p = proc_create("socket", S_IRUGO, atalk_proc_dir,
+ p = proc_create("socket", 0444, atalk_proc_dir,
&atalk_seq_socket_fops);
if (!p)
goto out_socket;
- p = proc_create("arp", S_IRUGO, atalk_proc_dir, &atalk_seq_arp_fops);
+ p = proc_create("arp", 0444, atalk_proc_dir, &atalk_seq_arp_fops);
if (!p)
goto out_arp;
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index 5d2fed9f5710..39b94ca5f65d 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -96,12 +96,12 @@ static ssize_t show_link_rate(struct device *cdev,
return scnprintf(buf, PAGE_SIZE, "%d\n", link_rate);
}
-static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
-static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
-static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
-static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
-static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
-static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
+static DEVICE_ATTR(address, 0444, show_address, NULL);
+static DEVICE_ATTR(atmaddress, 0444, show_atmaddress, NULL);
+static DEVICE_ATTR(atmindex, 0444, show_atmindex, NULL);
+static DEVICE_ATTR(carrier, 0444, show_carrier, NULL);
+static DEVICE_ATTR(type, 0444, show_type, NULL);
+static DEVICE_ATTR(link_rate, 0444, show_link_rate, NULL);
static struct device_attribute *atm_attrs[] = {
&dev_attr_atmaddress,
diff --git a/net/atm/clip.c b/net/atm/clip.c
index d4f6029d5109..f07dbc632222 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -893,7 +893,7 @@ static int __init atm_clip_init(void)
{
struct proc_dir_entry *p;
- p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
+ p = proc_create("arp", 0444, atm_proc_root, &arp_seq_fops);
if (!p) {
pr_err("Unable to initialize /proc/net/atm/arp\n");
atm_clip_exit_noproc();
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 09a1f056712a..01d5d20a6eb1 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1042,7 +1042,7 @@ static int __init lane_module_init(void)
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *p;
- p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
+ p = proc_create("lec", 0444, atm_proc_root, &lec_seq_fops);
if (!p) {
pr_err("Unable to initialize /proc/net/atm/lec\n");
return -ENOMEM;
diff --git a/net/atm/proc.c b/net/atm/proc.c
index edc48edc95c1..55410c00c7e2 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -474,7 +474,7 @@ int __init atm_proc_init(void)
for (e = atm_proc_ents; e->name; e++) {
struct proc_dir_entry *dirent;
- dirent = proc_create(e->name, S_IRUGO,
+ dirent = proc_create(e->name, 0444,
atm_proc_root, e->proc_fops);
if (!dirent)
goto err_out_remove;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index c8319ed48485..2b41366fcad2 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1989,10 +1989,10 @@ static int __init ax25_init(void)
dev_add_pack(&ax25_packet_type);
register_netdevice_notifier(&ax25_dev_notifier);
- proc_create("ax25_route", S_IRUGO, init_net.proc_net,
+ proc_create("ax25_route", 0444, init_net.proc_net,
&ax25_route_fops);
- proc_create("ax25", S_IRUGO, init_net.proc_net, &ax25_info_fops);
- proc_create("ax25_calls", S_IRUGO, init_net.proc_net, &ax25_uid_fops);
+ proc_create("ax25", 0444, init_net.proc_net, &ax25_info_fops);
+ proc_create("ax25_calls", 0444, init_net.proc_net, &ax25_uid_fops);
out:
return rc;
}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 5f3074cb6b4d..5e44d842cc5d 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -210,8 +210,8 @@ static ssize_t show_channel(struct device *tty_dev, struct device_attribute *att
return sprintf(buf, "%d\n", dev->channel);
}
-static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
-static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
+static DEVICE_ATTR(address, 0444, show_address, NULL);
+static DEVICE_ATTR(channel, 0444, show_channel, NULL);
static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
struct rfcomm_dlc *dlc)
diff --git a/net/bridge/br.c b/net/bridge/br.c
index a3f95ab9d6a3..26e1616b2c90 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -188,7 +188,6 @@ static void __net_exit br_net_exit(struct net *net)
static struct pernet_operations br_net_ops = {
.exit = br_net_exit,
- .async = true,
};
static const struct stp_proto br_stp_proto = {
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index c2120eb889a9..9b16eaf33819 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -969,7 +969,6 @@ static struct pernet_operations brnf_net_ops __read_mostly = {
.exit = brnf_exit_net,
.id = &brnf_net_id,
.size = sizeof(struct brnf_net),
- .async = true,
};
static struct notifier_block brnf_notifier __read_mostly = {
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index b1be0dcfba6b..0318a69888d4 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -893,7 +893,7 @@ static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
static struct bin_attribute bridge_forward = {
.attr = { .name = SYSFS_BRIDGE_FDB,
- .mode = S_IRUGO, },
+ .mode = 0444, },
.read = brforward_read,
};
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 126a8ea73c96..fd31ad83ec7b 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -44,7 +44,7 @@ static int store_##_name(struct net_bridge_port *p, unsigned long v) \
{ \
return store_flag(p, v, _mask); \
} \
-static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR, \
+static BRPORT_ATTR(_name, 0644, \
show_##_name, store_##_name)
static int store_flag(struct net_bridge_port *p, unsigned long v,
@@ -71,7 +71,7 @@ static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
return sprintf(buf, "%d\n", p->path_cost);
}
-static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR,
+static BRPORT_ATTR(path_cost, 0644,
show_path_cost, br_stp_set_path_cost);
static ssize_t show_priority(struct net_bridge_port *p, char *buf)
@@ -79,91 +79,91 @@ static ssize_t show_priority(struct net_bridge_port *p, char *buf)
return sprintf(buf, "%d\n", p->priority);
}
-static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR,
+static BRPORT_ATTR(priority, 0644,
show_priority, br_stp_set_port_priority);
static ssize_t show_designated_root(struct net_bridge_port *p, char *buf)
{
return br_show_bridge_id(buf, &p->designated_root);
}
-static BRPORT_ATTR(designated_root, S_IRUGO, show_designated_root, NULL);
+static BRPORT_ATTR(designated_root, 0444, show_designated_root, NULL);
static ssize_t show_designated_bridge(struct net_bridge_port *p, char *buf)
{
return br_show_bridge_id(buf, &p->designated_bridge);
}
-static BRPORT_ATTR(designated_bridge, S_IRUGO, show_designated_bridge, NULL);
+static BRPORT_ATTR(designated_bridge, 0444, show_designated_bridge, NULL);
static ssize_t show_designated_port(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->designated_port);
}
-static BRPORT_ATTR(designated_port, S_IRUGO, show_designated_port, NULL);
+static BRPORT_ATTR(designated_port, 0444, show_designated_port, NULL);
static ssize_t show_designated_cost(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->designated_cost);
}
-static BRPORT_ATTR(designated_cost, S_IRUGO, show_designated_cost, NULL);
+static BRPORT_ATTR(designated_cost, 0444, show_designated_cost, NULL);
static ssize_t show_port_id(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "0x%x\n", p->port_id);
}
-static BRPORT_ATTR(port_id, S_IRUGO, show_port_id, NULL);
+static BRPORT_ATTR(port_id, 0444, show_port_id, NULL);
static ssize_t show_port_no(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "0x%x\n", p->port_no);
}
-static BRPORT_ATTR(port_no, S_IRUGO, show_port_no, NULL);
+static BRPORT_ATTR(port_no, 0444, show_port_no, NULL);
static ssize_t show_change_ack(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->topology_change_ack);
}
-static BRPORT_ATTR(change_ack, S_IRUGO, show_change_ack, NULL);
+static BRPORT_ATTR(change_ack, 0444, show_change_ack, NULL);
static ssize_t show_config_pending(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->config_pending);
}
-static BRPORT_ATTR(config_pending, S_IRUGO, show_config_pending, NULL);
+static BRPORT_ATTR(config_pending, 0444, show_config_pending, NULL);
static ssize_t show_port_state(struct net_bridge_port *p, char *buf)
{
return sprintf(buf, "%d\n", p->state);
}
-static BRPORT_ATTR(state, S_IRUGO, show_port_state, NULL);
+static BRPORT_ATTR(state, 0444, show_port_state, NULL);
static ssize_t show_message_age_timer(struct net_bridge_port *p,
char *buf)
{
return sprintf(buf, "%ld\n", br_timer_value(&p->message_age_timer));
}
-static BRPORT_ATTR(message_age_timer, S_IRUGO, show_message_age_timer, NULL);
+static BRPORT_ATTR(message_age_timer, 0444, show_message_age_timer, NULL);
static ssize_t show_forward_delay_timer(struct net_bridge_port *p,
char *buf)
{
return sprintf(buf, "%ld\n", br_timer_value(&p->forward_delay_timer));
}
-static BRPORT_ATTR(forward_delay_timer, S_IRUGO, show_forward_delay_timer, NULL);
+static BRPORT_ATTR(forward_delay_timer, 0444, show_forward_delay_timer, NULL);
static ssize_t show_hold_timer(struct net_bridge_port *p,
char *buf)
{
return sprintf(buf, "%ld\n", br_timer_value(&p->hold_timer));
}
-static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL);
+static BRPORT_ATTR(hold_timer, 0444, show_hold_timer, NULL);
static int store_flush(struct net_bridge_port *p, unsigned long v)
{
br_fdb_delete_by_port(p->br, p, 0, 0); // Don't delete local entry
return 0;
}
-static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush);
+static BRPORT_ATTR(flush, 0200, NULL, store_flush);
static ssize_t show_group_fwd_mask(struct net_bridge_port *p, char *buf)
{
@@ -179,7 +179,7 @@ static int store_group_fwd_mask(struct net_bridge_port *p,
return 0;
}
-static BRPORT_ATTR(group_fwd_mask, S_IRUGO | S_IWUSR, show_group_fwd_mask,
+static BRPORT_ATTR(group_fwd_mask, 0644, show_group_fwd_mask,
store_group_fwd_mask);
BRPORT_ATTR_FLAG(hairpin_mode, BR_HAIRPIN_MODE);
@@ -204,7 +204,7 @@ static int store_multicast_router(struct net_bridge_port *p,
{
return br_multicast_set_port_router(p, v);
}
-static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
+static BRPORT_ATTR(multicast_router, 0644, show_multicast_router,
store_multicast_router);
BRPORT_ATTR_FLAG(multicast_fast_leave, BR_MULTICAST_FAST_LEAVE);
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index f070b5e5b9dd..276b60262981 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -77,7 +77,6 @@ static void __net_exit broute_net_exit(struct net *net)
static struct pernet_operations broute_net_ops = {
.init = broute_net_init,
.exit = broute_net_exit,
- .async = true,
};
static int __init ebtable_broute_init(void)
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 4151afc8efcc..c41da5fac84f 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -105,7 +105,6 @@ static void __net_exit frame_filter_net_exit(struct net *net)
static struct pernet_operations frame_filter_net_ops = {
.init = frame_filter_net_init,
.exit = frame_filter_net_exit,
- .async = true,
};
static int __init ebtable_filter_init(void)
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index b8da2dfe2ec5..08df7406ecb3 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -105,7 +105,6 @@ static void __net_exit frame_nat_net_exit(struct net *net)
static struct pernet_operations frame_nat_net_ops = {
.init = frame_nat_net_init,
.exit = frame_nat_net_exit,
- .async = true,
};
static int __init ebtable_nat_init(void)
diff --git a/net/bridge/netfilter/nf_log_bridge.c b/net/bridge/netfilter/nf_log_bridge.c
index 91bfc2ac055a..bd2b3c78f59b 100644
--- a/net/bridge/netfilter/nf_log_bridge.c
+++ b/net/bridge/netfilter/nf_log_bridge.c
@@ -48,7 +48,6 @@ static void __net_exit nf_log_bridge_net_exit(struct net *net)
static struct pernet_operations nf_log_bridge_net_ops = {
.init = nf_log_bridge_net_init,
.exit = nf_log_bridge_net_exit,
- .async = true,
};
static int __init nf_log_bridge_init(void)
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 7a78268cc572..e0adcd123f48 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -544,7 +544,6 @@ static struct pernet_operations caif_net_ops = {
.exit = caif_exit_net,
.id = &caif_net_id,
.size = sizeof(struct caif_net),
- .async = true,
};
/* Initialize Caif devices list */
diff --git a/net/can/af_can.c b/net/can/af_can.c
index e899970398a1..1684ba5b51eb 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -72,7 +72,7 @@ MODULE_AUTHOR("Urs Thuermann <[email protected]>, "
MODULE_ALIAS_NETPROTO(PF_CAN);
static int stats_timer __read_mostly = 1;
-module_param(stats_timer, int, S_IRUGO);
+module_param(stats_timer, int, 0444);
MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
static struct kmem_cache *rcv_cache __read_mostly;
@@ -954,7 +954,6 @@ static struct notifier_block can_netdev_notifier __read_mostly = {
static struct pernet_operations can_pernet_ops __read_mostly = {
.init = can_pernet_init,
.exit = can_pernet_exit,
- .async = true,
};
static __init int can_init(void)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 26730d39e048..ac5e5e34fee3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1717,7 +1717,6 @@ static void canbcm_pernet_exit(struct net *net)
static struct pernet_operations canbcm_pernet_ops __read_mostly = {
.init = canbcm_pernet_init,
.exit = canbcm_pernet_exit,
- .async = true,
};
static int __init bcm_module_init(void)
diff --git a/net/can/gw.c b/net/can/gw.c
index 08e97668d5cf..faa3da88a127 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -72,7 +72,7 @@ MODULE_ALIAS(CAN_GW_NAME);
#define CGW_DEFAULT_HOPS 1
static unsigned int max_hops __read_mostly = CGW_DEFAULT_HOPS;
-module_param(max_hops, uint, S_IRUGO);
+module_param(max_hops, uint, 0444);
MODULE_PARM_DESC(max_hops,
"maximum " CAN_GW_NAME " routing hops for CAN frames "
"(valid values: " __stringify(CGW_MIN_HOPS) "-"
@@ -1010,7 +1010,6 @@ static void __net_exit cangw_pernet_exit(struct net *net)
static struct pernet_operations cangw_pernet_ops = {
.init = cangw_pernet_init,
.exit = cangw_pernet_exit,
- .async = true,
};
static __init int cgw_module_init(void)
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 4d4c82229e9e..4adf07826f4a 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -54,7 +54,7 @@ static const struct kernel_param_ops param_ops_supported_features = {
.get = param_get_supported_features,
};
module_param_cb(supported_features, &param_ops_supported_features, NULL,
- S_IRUGO);
+ 0444);
const char *ceph_msg_type_name(int type)
{
diff --git a/net/core/dev.c b/net/core/dev.c
index f9c28f44286c..eca5458b2753 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1571,6 +1571,25 @@ static void dev_disable_gro_hw(struct net_device *dev)
netdev_WARN(dev, "failed to disable GRO_HW!\n");
}
+const char *netdev_cmd_to_name(enum netdev_cmd cmd)
+{
+#define N(val) \
+ case NETDEV_##val: \
+ return "NETDEV_" __stringify(val);
+ switch (cmd) {
+ N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
+ N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
+ N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
+ N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
+ N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
+ N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
+ N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
+ };
+#undef N
+ return "UNKNOWN_NETDEV_EVENT";
+}
+EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
+
static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
struct net_device *dev)
{
@@ -1610,6 +1629,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
goto unlock;
if (dev_boot_phase)
goto unlock;
+ down_read(&net_rwsem);
for_each_net(net) {
for_each_netdev(net, dev) {
err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
@@ -1623,6 +1643,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
call_netdevice_notifier(nb, NETDEV_UP, dev);
}
}
+ up_read(&net_rwsem);
unlock:
rtnl_unlock();
@@ -1645,6 +1666,7 @@ rollback:
}
outroll:
+ up_read(&net_rwsem);
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
@@ -1675,6 +1697,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
if (err)
goto unlock;
+ down_read(&net_rwsem);
for_each_net(net) {
for_each_netdev(net, dev) {
if (dev->flags & IFF_UP) {
@@ -1685,6 +1708,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
}
}
+ up_read(&net_rwsem);
unlock:
rtnl_unlock();
return err;
@@ -8077,7 +8101,6 @@ static void netdev_wait_allrefs(struct net_device *dev)
rcu_barrier();
rtnl_lock();
- call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
&dev->state)) {
/* We must not have linkwatch events
@@ -8149,10 +8172,6 @@ void netdev_run_todo(void)
= list_first_entry(&list, struct net_device, todo_list);
list_del(&dev->todo_list);
- rtnl_lock();
- call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
- __rtnl_unlock();
-
if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
pr_err("network todo '%s' but state %d\n",
dev->name, dev->reg_state);
@@ -8594,7 +8613,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
rcu_barrier();
- call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
new_nsid = peernet2id_alloc(dev_net(dev), net);
/* If there is an ifindex conflict assign a new one */
@@ -8870,7 +8888,6 @@ static void __net_exit netdev_exit(struct net *net)
static struct pernet_operations __net_initdata netdev_net_ops = {
.init = netdev_init,
.exit = netdev_exit,
- .async = true,
};
static void __net_exit default_device_exit(struct net *net)
@@ -8971,7 +8988,6 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
static struct pernet_operations __net_initdata default_device_ops = {
.exit = default_device_exit,
.exit_batch = default_device_exit_batch,
- .async = true,
};
/*
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 157cd9efa4be..bb6e498c6e3d 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -121,6 +121,7 @@ tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
[ETHTOOL_ID_UNSPEC] = "Unspec",
[ETHTOOL_RX_COPYBREAK] = "rx-copybreak",
[ETHTOOL_TX_COPYBREAK] = "tx-copybreak",
+ [ETHTOOL_PFC_PREVENTION_TOUT] = "pfc-prevention-tout",
};
static const char
@@ -2311,6 +2312,11 @@ static int ethtool_tunable_valid(const struct ethtool_tunable *tuna)
tuna->type_id != ETHTOOL_TUNABLE_U32)
return -EINVAL;
break;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (tuna->len != sizeof(u16) ||
+ tuna->type_id != ETHTOOL_TUNABLE_U16)
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
diff --git a/net/core/fib_notifier.c b/net/core/fib_notifier.c
index 5ace0705a3f9..13a40b831d6d 100644
--- a/net/core/fib_notifier.c
+++ b/net/core/fib_notifier.c
@@ -13,16 +13,22 @@ int call_fib_notifier(struct notifier_block *nb, struct net *net,
enum fib_event_type event_type,
struct fib_notifier_info *info)
{
+ int err;
+
info->net = net;
- return nb->notifier_call(nb, event_type, info);
+ err = nb->notifier_call(nb, event_type, info);
+ return notifier_to_errno(err);
}
EXPORT_SYMBOL(call_fib_notifier);
int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
struct fib_notifier_info *info)
{
+ int err;
+
info->net = net;
- return atomic_notifier_call_chain(&fib_chain, event_type, info);
+ err = atomic_notifier_call_chain(&fib_chain, event_type, info);
+ return notifier_to_errno(err);
}
EXPORT_SYMBOL(call_fib_notifiers);
@@ -33,6 +39,7 @@ static unsigned int fib_seq_sum(void)
struct net *net;
rtnl_lock();
+ down_read(&net_rwsem);
for_each_net(net) {
rcu_read_lock();
list_for_each_entry_rcu(ops, &net->fib_notifier_ops, list) {
@@ -43,6 +50,7 @@ static unsigned int fib_seq_sum(void)
}
rcu_read_unlock();
}
+ up_read(&net_rwsem);
rtnl_unlock();
return fib_seq;
@@ -171,7 +179,6 @@ static void __net_exit fib_notifier_net_exit(struct net *net)
static struct pernet_operations fib_notifier_net_ops = {
.init = fib_notifier_net_init,
.exit = fib_notifier_net_exit,
- .async = true,
};
static int __init fib_notifier_init(void)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f6f04fc0f629..33958f84c173 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -631,6 +631,11 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
goto errout_free;
+ err = call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops,
+ extack);
+ if (err < 0)
+ goto errout_free;
+
list_for_each_entry(r, &ops->rules_list, list) {
if (r->pref > rule->pref)
break;
@@ -667,7 +672,6 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
if (rule->tun_id)
ip_tunnel_need_metadata();
- call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops, extack);
notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
flush_route_cache(ops);
rules_ops_put(ops);
@@ -1130,7 +1134,6 @@ static void __net_exit fib_rules_net_exit(struct net *net)
static struct pernet_operations fib_rules_net_ops = {
.init = fib_rules_net_init,
.exit = fib_rules_net_exit,
- .async = true,
};
static int __init fib_rules_init(void)
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 65b51e778782..9737302907b1 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -315,12 +315,12 @@ static int __net_init dev_proc_net_init(struct net *net)
{
int rc = -ENOMEM;
- if (!proc_create("dev", S_IRUGO, net->proc_net, &dev_seq_fops))
+ if (!proc_create("dev", 0444, net->proc_net, &dev_seq_fops))
goto out;
- if (!proc_create("softnet_stat", S_IRUGO, net->proc_net,
+ if (!proc_create("softnet_stat", 0444, net->proc_net,
&softnet_seq_fops))
goto out_dev;
- if (!proc_create("ptype", S_IRUGO, net->proc_net, &ptype_seq_fops))
+ if (!proc_create("ptype", 0444, net->proc_net, &ptype_seq_fops))
goto out_softnet;
if (wext_proc_init(net))
@@ -349,7 +349,6 @@ static void __net_exit dev_proc_net_exit(struct net *net)
static struct pernet_operations __net_initdata dev_proc_ops = {
.init = dev_proc_net_init,
.exit = dev_proc_net_exit,
- .async = true,
};
static int dev_mc_seq_show(struct seq_file *seq, void *v)
@@ -406,7 +405,6 @@ static void __net_exit dev_mc_net_exit(struct net *net)
static struct pernet_operations __net_initdata dev_mc_net_ops = {
.init = dev_mc_net_init,
.exit = dev_mc_net_exit,
- .async = true,
};
int __init dev_proc_init(void)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 60a5ad2c33ee..c476f0794132 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -431,7 +431,7 @@ static ssize_t group_store(struct device *dev, struct device_attribute *attr,
return netdev_store(dev, attr, buf, len, change_group);
}
NETDEVICE_SHOW(group, fmt_dec);
-static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
+static DEVICE_ATTR(netdev_group, 0644, group_show, group_store);
static int change_proto_down(struct net_device *dev, unsigned long proto_down)
{
@@ -854,10 +854,10 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
}
static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
- = __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
+ = __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map);
static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
- = __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
+ = __ATTR(rps_flow_cnt, 0644,
show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
#endif /* CONFIG_RPS */
@@ -1154,7 +1154,7 @@ static ssize_t bql_set_hold_time(struct netdev_queue *queue,
}
static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
- = __ATTR(hold_time, S_IRUGO | S_IWUSR,
+ = __ATTR(hold_time, 0644,
bql_show_hold_time, bql_set_hold_time);
static ssize_t bql_show_inflight(struct netdev_queue *queue,
@@ -1166,7 +1166,7 @@ static ssize_t bql_show_inflight(struct netdev_queue *queue,
}
static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
- __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
+ __ATTR(inflight, 0444, bql_show_inflight, NULL);
#define BQL_ATTR(NAME, FIELD) \
static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
@@ -1182,7 +1182,7 @@ static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
} \
\
static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
- = __ATTR(NAME, S_IRUGO | S_IWUSR, \
+ = __ATTR(NAME, 0644, \
bql_show_ ## NAME, bql_set_ ## NAME)
BQL_ATTR(limit, limit);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 95ba2c53bd9a..7fdf321d4997 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -33,6 +33,10 @@ static struct list_head *first_device = &pernet_list;
LIST_HEAD(net_namespace_list);
EXPORT_SYMBOL_GPL(net_namespace_list);
+/* Protects net_namespace_list. Nests iside rtnl_lock() */
+DECLARE_RWSEM(net_rwsem);
+EXPORT_SYMBOL_GPL(net_rwsem);
+
struct net init_net = {
.count = REFCOUNT_INIT(1),
.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
@@ -40,12 +44,13 @@ struct net init_net = {
EXPORT_SYMBOL(init_net);
static bool init_net_initialized;
-static unsigned nr_sync_pernet_ops;
/*
- * net_sem: protects: pernet_list, net_generic_ids, nr_sync_pernet_ops,
+ * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
* init_net_initialized and first_device pointer.
+ * This is internal net namespace object. Please, don't use it
+ * outside.
*/
-DECLARE_RWSEM(net_sem);
+DECLARE_RWSEM(pernet_ops_rwsem);
#define MIN_PERNET_OPS_ID \
((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
@@ -73,7 +78,7 @@ static int net_assign_generic(struct net *net, unsigned int id, void *data)
BUG_ON(id < MIN_PERNET_OPS_ID);
old_ng = rcu_dereference_protected(net->gen,
- lockdep_is_held(&net_sem));
+ lockdep_is_held(&pernet_ops_rwsem));
if (old_ng->s.len > id) {
old_ng->ptr[id] = data;
return 0;
@@ -290,7 +295,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
*/
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
{
- /* Must be called with net_sem held */
+ /* Must be called with pernet_ops_rwsem held */
const struct pernet_operations *ops, *saved_ops;
int error = 0;
LIST_HEAD(net_exit_list);
@@ -308,9 +313,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
if (error < 0)
goto out_undo;
}
- rtnl_lock();
+ down_write(&net_rwsem);
list_add_tail_rcu(&net->list, &net_namespace_list);
- rtnl_unlock();
+ up_write(&net_rwsem);
out:
return error;
@@ -339,7 +344,6 @@ static int __net_init net_defaults_init_net(struct net *net)
static struct pernet_operations net_defaults_ops = {
.init = net_defaults_init_net,
- .async = true,
};
static __init int net_defaults_init(void)
@@ -406,7 +410,6 @@ struct net *copy_net_ns(unsigned long flags,
{
struct ucounts *ucounts;
struct net *net;
- unsigned write;
int rv;
if (!(flags & CLONE_NEWNET))
@@ -424,25 +427,14 @@ struct net *copy_net_ns(unsigned long flags,
refcount_set(&net->passive, 1);
net->ucounts = ucounts;
get_user_ns(user_ns);
-again:
- write = READ_ONCE(nr_sync_pernet_ops);
- if (write)
- rv = down_write_killable(&net_sem);
- else
- rv = down_read_killable(&net_sem);
+
+ rv = down_read_killable(&pernet_ops_rwsem);
if (rv < 0)
goto put_userns;
- if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
- up_read(&net_sem);
- goto again;
- }
rv = setup_net(net, user_ns);
- if (write)
- up_write(&net_sem);
- else
- up_read(&net_sem);
+ up_read(&pernet_ops_rwsem);
if (rv < 0) {
put_userns:
@@ -462,7 +454,7 @@ static void unhash_nsid(struct net *net, struct net *last)
* and this work is the only process, that may delete
* a net from net_namespace_list. So, when the below
* is executing, the list may only grow. Thus, we do not
- * use for_each_net_rcu() or rtnl_lock().
+ * use for_each_net_rcu() or net_rwsem.
*/
for_each_net(tmp) {
int id;
@@ -490,24 +482,14 @@ static void cleanup_net(struct work_struct *work)
struct net *net, *tmp, *last;
struct llist_node *net_kill_list;
LIST_HEAD(net_exit_list);
- unsigned write;
/* Atomically snapshot the list of namespaces to cleanup */
net_kill_list = llist_del_all(&cleanup_list);
-again:
- write = READ_ONCE(nr_sync_pernet_ops);
- if (write)
- down_write(&net_sem);
- else
- down_read(&net_sem);
- if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
- up_read(&net_sem);
- goto again;
- }
+ down_read(&pernet_ops_rwsem);
/* Don't let anyone else find us. */
- rtnl_lock();
+ down_write(&net_rwsem);
llist_for_each_entry(net, net_kill_list, cleanup_list)
list_del_rcu(&net->list);
/* Cache last net. After we unlock rtnl, no one new net
@@ -521,7 +503,7 @@ again:
* useless anyway, as netns_ids are destroyed there.
*/
last = list_last_entry(&net_namespace_list, struct net, list);
- rtnl_unlock();
+ up_write(&net_rwsem);
llist_for_each_entry(net, net_kill_list, cleanup_list) {
unhash_nsid(net, last);
@@ -543,10 +525,7 @@ again:
list_for_each_entry_reverse(ops, &pernet_list, list)
ops_free_list(ops, &net_exit_list);
- if (write)
- up_write(&net_sem);
- else
- up_read(&net_sem);
+ up_read(&pernet_ops_rwsem);
/* Ensure there are no outstanding rcu callbacks using this
* network namespace.
@@ -573,8 +552,8 @@ again:
*/
void net_ns_barrier(void)
{
- down_write(&net_sem);
- up_write(&net_sem);
+ down_write(&pernet_ops_rwsem);
+ up_write(&pernet_ops_rwsem);
}
EXPORT_SYMBOL(net_ns_barrier);
@@ -654,7 +633,6 @@ static __net_exit void net_ns_net_exit(struct net *net)
static struct pernet_operations __net_initdata net_ns_ops = {
.init = net_ns_net_init,
.exit = net_ns_net_exit,
- .async = true,
};
static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
@@ -897,12 +875,12 @@ static int __init net_ns_init(void)
rcu_assign_pointer(init_net.gen, ng);
- down_write(&net_sem);
+ down_write(&pernet_ops_rwsem);
if (setup_net(&init_net, &init_user_ns))
panic("Could not setup the initial network namespace");
init_net_initialized = true;
- up_write(&net_sem);
+ up_write(&pernet_ops_rwsem);
register_pernet_subsys(&net_ns_ops);
@@ -926,6 +904,9 @@ static int __register_pernet_operations(struct list_head *list,
list_add_tail(&ops->list, list);
if (ops->init || (ops->id && ops->size)) {
+ /* We held write locked pernet_ops_rwsem, and parallel
+ * setup_net() and cleanup_net() are not possible.
+ */
for_each_net(net) {
error = ops_init(ops, net);
if (error)
@@ -949,6 +930,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
LIST_HEAD(net_exit_list);
list_del(&ops->list);
+ /* See comment in __register_pernet_operations() */
for_each_net(net)
list_add_tail(&net->exit_list, &net_exit_list);
ops_exit_list(ops, &net_exit_list);
@@ -1006,9 +988,6 @@ again:
rcu_barrier();
if (ops->id)
ida_remove(&net_generic_ids, *ops->id);
- } else if (!ops->async) {
- pr_info_once("Pernet operations %ps are sync.\n", ops);
- nr_sync_pernet_ops++;
}
return error;
@@ -1016,8 +995,6 @@ again:
static void unregister_pernet_operations(struct pernet_operations *ops)
{
- if (!ops->async)
- BUG_ON(nr_sync_pernet_ops-- == 0);
__unregister_pernet_operations(ops);
rcu_barrier();
if (ops->id)
@@ -1046,9 +1023,9 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
int register_pernet_subsys(struct pernet_operations *ops)
{
int error;
- down_write(&net_sem);
+ down_write(&pernet_ops_rwsem);
error = register_pernet_operations(first_device, ops);
- up_write(&net_sem);
+ up_write(&pernet_ops_rwsem);
return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);
@@ -1064,9 +1041,9 @@ EXPORT_SYMBOL_GPL(register_pernet_subsys);
*/
void unregister_pernet_subsys(struct pernet_operations *ops)
{
- down_write(&net_sem);
+ down_write(&pernet_ops_rwsem);
unregister_pernet_operations(ops);
- up_write(&net_sem);
+ up_write(&pernet_ops_rwsem);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
@@ -1092,11 +1069,11 @@ EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
int register_pernet_device(struct pernet_operations *ops)
{
int error;
- down_write(&net_sem);
+ down_write(&pernet_ops_rwsem);
error = register_pernet_operations(&pernet_list, ops);
if (!error && (first_device == &pernet_list))
first_device = &ops->list;
- up_write(&net_sem);
+ up_write(&pernet_ops_rwsem);
return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);
@@ -1112,11 +1089,11 @@ EXPORT_SYMBOL_GPL(register_pernet_device);
*/
void unregister_pernet_device(struct pernet_operations *ops)
{
- down_write(&net_sem);
+ down_write(&pernet_ops_rwsem);
if (&ops->list == first_device)
first_device = first_device->next;
unregister_pernet_operations(ops);
- up_write(&net_sem);
+ up_write(&pernet_ops_rwsem);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 545cf08cd558..7e4ede34cc52 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3852,7 +3852,6 @@ static struct pernet_operations pg_net_ops = {
.exit = pg_net_exit,
.id = &pg_net_id,
.size = sizeof(struct pktgen_net),
- .async = true,
};
static int __init pg_init(void)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 87079eaa871b..e86b28482ca7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -418,9 +418,11 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
{
struct net *net;
+ down_read(&net_rwsem);
for_each_net(net) {
__rtnl_kill_links(net, ops);
}
+ up_read(&net_rwsem);
list_del(&ops->list);
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
@@ -438,6 +440,9 @@ static void rtnl_lock_unregistering_all(void)
for (;;) {
unregistering = false;
rtnl_lock();
+ /* We held write locked pernet_ops_rwsem, and parallel
+ * setup_net() and cleanup_net() are not possible.
+ */
for_each_net(net) {
if (net->dev_unreg_count > 0) {
unregistering = true;
@@ -459,12 +464,12 @@ static void rtnl_lock_unregistering_all(void)
*/
void rtnl_link_unregister(struct rtnl_link_ops *ops)
{
- /* Close the race with cleanup_net() */
- down_write(&net_sem);
+ /* Close the race with setup_net() and cleanup_net() */
+ down_write(&pernet_ops_rwsem);
rtnl_lock_unregistering_all();
__rtnl_link_unregister(ops);
rtnl_unlock();
- up_write(&net_sem);
+ up_write(&pernet_ops_rwsem);
}
EXPORT_SYMBOL_GPL(rtnl_link_unregister);
@@ -4730,7 +4735,6 @@ static void __net_exit rtnetlink_net_exit(struct net *net)
static struct pernet_operations rtnetlink_net_ops = {
.init = rtnetlink_net_init,
.exit = rtnetlink_net_exit,
- .async = true,
};
void __init rtnetlink_init(void)
diff --git a/net/core/sock.c b/net/core/sock.c
index e689496dfd8a..6444525f610c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3175,7 +3175,6 @@ static void __net_exit sock_inuse_exit_net(struct net *net)
static struct pernet_operations net_inuse_ops = {
.init = sock_inuse_init_net,
.exit = sock_inuse_exit_net,
- .async = true,
};
static __init int net_inuse_init(void)
@@ -3455,7 +3454,7 @@ static const struct file_operations proto_seq_fops = {
static __net_init int proto_init_net(struct net *net)
{
- if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
+ if (!proc_create("protocols", 0444, net->proc_net, &proto_seq_fops))
return -ENOMEM;
return 0;
@@ -3470,7 +3469,6 @@ static __net_exit void proto_exit_net(struct net *net)
static __net_initdata struct pernet_operations proto_net_ops = {
.init = proto_init_net,
.exit = proto_exit_net,
- .async = true,
};
static int __init proto_init(void)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index a3392a8f9276..c37b5be7c5e4 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -324,7 +324,6 @@ static void __net_exit diag_net_exit(struct net *net)
static struct pernet_operations diag_net_ops = {
.init = diag_net_init,
.exit = diag_net_exit,
- .async = true,
};
static int __init sock_diag_init(void)
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 4f47f92459cc..b3b609f0eeb5 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -584,7 +584,6 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
static __net_initdata struct pernet_operations sysctl_core_ops = {
.init = sysctl_core_net_init,
.exit = sysctl_core_net_exit,
- .async = true,
};
static __init int sysctl_core_init(void)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 13ad28ab1e79..e65fcb45c3f6 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -1031,7 +1031,6 @@ static struct pernet_operations dccp_v4_ops = {
.init = dccp_v4_init_net,
.exit = dccp_v4_exit_net,
.exit_batch = dccp_v4_exit_batch,
- .async = true,
};
static int __init dccp_v4_init(void)
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 2f48c020f8c3..5df7857fc0f3 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1116,7 +1116,6 @@ static struct pernet_operations dccp_v6_ops = {
.init = dccp_v6_init_net,
.exit = dccp_v6_exit_net,
.exit_batch = dccp_v6_exit_batch,
- .async = true,
};
static int __init dccp_v6_init(void)
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2ee8306c23e3..32751602767f 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2383,7 +2383,7 @@ static int __init decnet_init(void)
dev_add_pack(&dn_dix_packet_type);
register_netdevice_notifier(&dn_dev_notifier);
- proc_create("decnet", S_IRUGO, init_net.proc_net, &dn_socket_seq_fops);
+ proc_create("decnet", 0444, init_net.proc_net, &dn_socket_seq_fops);
dn_register_sysctl();
out:
return rc;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index c9f5e1ebb9c8..c03b046478c3 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1424,7 +1424,7 @@ void __init dn_dev_init(void)
rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETADDR,
NULL, dn_nl_dump_ifaddr, 0);
- proc_create("decnet_dev", S_IRUGO, init_net.proc_net, &dn_dev_seq_fops);
+ proc_create("decnet_dev", 0444, init_net.proc_net, &dn_dev_seq_fops);
#ifdef CONFIG_SYSCTL
{
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 6e37d9e6345e..13156165afa3 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -608,7 +608,7 @@ static const struct file_operations dn_neigh_seq_fops = {
void __init dn_neigh_init(void)
{
neigh_table_init(NEIGH_DN_TABLE, &dn_neigh_table);
- proc_create("decnet_neigh", S_IRUGO, init_net.proc_net,
+ proc_create("decnet_neigh", 0444, init_net.proc_net,
&dn_neigh_seq_fops);
}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index ef20b8e31669..eca0cc6b761f 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1918,7 +1918,7 @@ void __init dn_route_init(void)
dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
- proc_create("decnet_cache", S_IRUGO, init_net.proc_net,
+ proc_create("decnet_cache", 0444, init_net.proc_net,
&dn_rt_cache_seq_fops);
#ifdef CONFIG_DECNET_ROUTER
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index e1d4d898a007..8396705deffc 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -38,7 +38,7 @@ MODULE_AUTHOR("Wang Lei");
MODULE_LICENSE("GPL");
unsigned int dns_resolver_debug;
-module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
+module_param_named(debug, dns_resolver_debug, uint, 0644);
MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
const struct cred *dns_resolver_cache;
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index a9ccb1322f69..85bf86ad6b18 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -603,7 +603,6 @@ static void __net_exit lowpan_frags_exit_net(struct net *net)
static struct pernet_operations lowpan_frags_ops = {
.init = lowpan_frags_init_net,
.exit = lowpan_frags_exit_net,
- .async = true,
};
int __init lowpan_net_frag_init(void)
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index 9104943c15ba..cb7176cd4cd6 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -345,7 +345,6 @@ static void __net_exit cfg802154_pernet_exit(struct net *net)
static struct pernet_operations cfg802154_pernet_ops = {
.exit = cfg802154_pernet_exit,
- .async = true,
};
static int __init wpan_phy_class_init(void)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e8c7fad8c329..f98e2f0db841 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1735,7 +1735,6 @@ static __net_exit void ipv4_mib_exit_net(struct net *net)
static __net_initdata struct pernet_operations ipv4_mib_ops = {
.init = ipv4_mib_init_net,
.exit = ipv4_mib_exit_net,
- .async = true,
};
static int __init init_ipv4_mibs(void)
@@ -1789,7 +1788,6 @@ static __net_exit void inet_exit_net(struct net *net)
static __net_initdata struct pernet_operations af_inet_ops = {
.init = inet_init_net,
.exit = inet_exit_net,
- .async = true,
};
static int __init init_inet_pernet_ops(void)
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 7dc9de8444a9..be4c595edccb 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1434,7 +1434,7 @@ static const struct file_operations arp_seq_fops = {
static int __net_init arp_net_init(struct net *net)
{
- if (!proc_create("arp", S_IRUGO, net->proc_net, &arp_seq_fops))
+ if (!proc_create("arp", 0444, net->proc_net, &arp_seq_fops))
return -ENOMEM;
return 0;
}
@@ -1447,7 +1447,6 @@ static void __net_exit arp_net_exit(struct net *net)
static struct pernet_operations arp_net_ops = {
.init = arp_net_init,
.exit = arp_net_exit,
- .async = true,
};
static int __init arp_proc_init(void)
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5ae0d1f097ca..40f001782c1b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2469,7 +2469,6 @@ static __net_exit void devinet_exit_net(struct net *net)
static __net_initdata struct pernet_operations devinet_ops = {
.init = devinet_init_net,
.exit = devinet_exit_net,
- .async = true,
};
static struct rtnl_af_ops inet_af_ops __read_mostly = {
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 296d0b956bfe..97689012b357 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -654,7 +654,7 @@ static void esp_input_restore_header(struct sk_buff *skb)
static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
{
struct xfrm_state *x = xfrm_input_state(skb);
- struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
+ struct ip_esp_hdr *esph;
/* For ESN we move the header forward by 4 bytes to
* accomodate the high bits. We will move it back after
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index da5635fc52c2..7cf755ef9efb 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -138,6 +138,8 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
(x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+ else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
+ esp_features = features & ~NETIF_F_CSUM_MASK;
xo->flags |= XFRM_GSO_SEGMENT;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index ac71c3d496c0..f05afaf3235c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1362,7 +1362,6 @@ static void __net_exit fib_net_exit(struct net *net)
static struct pernet_operations fib_net_ops = {
.init = fib_net_init,
.exit = fib_net_exit,
- .async = true,
};
void __init ip_fib_init(void)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 62243a8abf92..3dcffd3ce98c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1065,6 +1065,9 @@ noleaf:
return -ENOMEM;
}
+/* fib notifier for ADD is sent before calling fib_insert_alias with
+ * the expectation that the only possible failure ENOMEM
+ */
static int fib_insert_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *new,
struct fib_alias *fa, t_key key)
@@ -1216,8 +1219,13 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
new_fa->tb_id = tb->tb_id;
new_fa->fa_default = -1;
- call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
- key, plen, new_fa, extack);
+ err = call_fib_entry_notifiers(net,
+ FIB_EVENT_ENTRY_REPLACE,
+ key, plen, new_fa,
+ extack);
+ if (err)
+ goto out_free_new_fa;
+
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
tb->tb_id, &cfg->fc_nlinfo, nlflags);
@@ -1263,21 +1271,32 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
new_fa->tb_id = tb->tb_id;
new_fa->fa_default = -1;
+ err = call_fib_entry_notifiers(net, event, key, plen, new_fa, extack);
+ if (err)
+ goto out_free_new_fa;
+
/* Insert new entry to the list. */
err = fib_insert_alias(t, tp, l, new_fa, fa, key);
if (err)
- goto out_free_new_fa;
+ goto out_fib_notif;
if (!plen)
tb->tb_num_default++;
rt_cache_flush(cfg->fc_nlinfo.nl_net);
- call_fib_entry_notifiers(net, event, key, plen, new_fa, extack);
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
&cfg->fc_nlinfo, nlflags);
succeeded:
return 0;
+out_fib_notif:
+ /* notifier was sent that entry would be added to trie, but
+ * the add failed and need to recover. Only failure for
+ * fib_insert_alias is ENOMEM.
+ */
+ NL_SET_ERR_MSG(extack, "Failed to insert route into trie");
+ call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, key,
+ plen, new_fa, NULL);
out_free_new_fa:
kmem_cache_free(fn_alias_kmem, new_fa);
out:
@@ -2722,14 +2741,14 @@ static const struct file_operations fib_route_fops = {
int __net_init fib_proc_init(struct net *net)
{
- if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
+ if (!proc_create("fib_trie", 0444, net->proc_net, &fib_trie_fops))
goto out1;
- if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
+ if (!proc_create("fib_triestat", 0444, net->proc_net,
&fib_triestat_fops))
goto out2;
- if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
+ if (!proc_create("route", 0444, net->proc_net, &fib_route_fops))
goto out3;
return 0;
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index d3e1a9af478b..1540db65241a 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -1081,7 +1081,6 @@ static struct pernet_operations fou_net_ops = {
.exit = fou_exit_net,
.id = &fou_net_id,
.size = sizeof(struct fou_net),
- .async = true,
};
static int __init fou_init(void)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index cc56efa64d5c..1617604c9284 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1257,7 +1257,6 @@ fail:
static struct pernet_operations __net_initdata icmp_sk_ops = {
.init = icmp_sk_init,
.exit = icmp_sk_exit,
- .async = true,
};
int __init icmp_init(void)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c2743763777e..b26a81a7de42 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2993,10 +2993,10 @@ static int __net_init igmp_net_init(struct net *net)
struct proc_dir_entry *pde;
int err;
- pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops);
+ pde = proc_create("igmp", 0444, net->proc_net, &igmp_mc_seq_fops);
if (!pde)
goto out_igmp;
- pde = proc_create("mcfilter", S_IRUGO, net->proc_net,
+ pde = proc_create("mcfilter", 0444, net->proc_net,
&igmp_mcf_seq_fops);
if (!pde)
goto out_mcfilter;
@@ -3028,7 +3028,6 @@ static void __net_exit igmp_net_exit(struct net *net)
static struct pernet_operations igmp_net_ops = {
.init = igmp_net_init,
.exit = igmp_net_exit,
- .async = true,
};
#endif
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 5e843ae5e468..bbf1b94942c0 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -885,7 +885,6 @@ static void __net_exit ipv4_frags_exit_net(struct net *net)
static struct pernet_operations ip4_frags_ops = {
.init = ipv4_frags_init_net,
.exit = ipv4_frags_exit_net,
- .async = true,
};
void __init ipfrag_init(void)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 9ab1aa2f7660..a8772a978224 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1044,7 +1044,6 @@ static struct pernet_operations ipgre_net_ops = {
.exit_batch = ipgre_exit_batch_net,
.id = &ipgre_net_id,
.size = sizeof(struct ip_tunnel_net),
- .async = true,
};
static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1628,7 +1627,6 @@ static struct pernet_operations ipgre_tap_net_ops = {
.exit_batch = ipgre_tap_exit_batch_net,
.id = &gre_tap_net_id,
.size = sizeof(struct ip_tunnel_net),
- .async = true,
};
static int __net_init erspan_init_net(struct net *net)
@@ -1647,7 +1645,6 @@ static struct pernet_operations erspan_net_ops = {
.exit_batch = erspan_exit_batch_net,
.id = &erspan_net_id,
.size = sizeof(struct ip_tunnel_net),
- .async = true,
};
static int __init ipgre_init(void)
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index b10bf563afd9..51b1669334fe 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -454,7 +454,6 @@ static struct pernet_operations vti_net_ops = {
.exit_batch = vti_exit_batch_net,
.id = &vti_net_id,
.size = sizeof(struct ip_tunnel_net),
- .async = true,
};
static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index f75802ad960f..43f620feb1c4 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1369,7 +1369,7 @@ static int __init ip_auto_config(void)
unsigned int i;
#ifdef CONFIG_PROC_FS
- proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops);
+ proc_create("pnp", 0444, init_net.proc_net, &pnp_seq_fops);
#endif /* CONFIG_PROC_FS */
if (!ic_enable)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 9c5a4d164f09..c891235b4966 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -669,7 +669,6 @@ static struct pernet_operations ipip_net_ops = {
.exit_batch = ipip_exit_batch_net,
.id = &ipip_net_id,
.size = sizeof(struct ip_tunnel_net),
- .async = true,
};
static int __init ipip_init(void)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index f6be5db16da2..2fb4de3f7f66 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -644,80 +644,22 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
}
#endif
-static int call_ipmr_vif_entry_notifier(struct notifier_block *nb,
- struct net *net,
- enum fib_event_type event_type,
- struct vif_device *vif,
- vifi_t vif_index, u32 tb_id)
-{
- struct vif_entry_notifier_info info = {
- .info = {
- .family = RTNL_FAMILY_IPMR,
- .net = net,
- },
- .dev = vif->dev,
- .vif_index = vif_index,
- .vif_flags = vif->flags,
- .tb_id = tb_id,
- };
-
- return call_fib_notifier(nb, net, event_type, &info.info);
-}
-
static int call_ipmr_vif_entry_notifiers(struct net *net,
enum fib_event_type event_type,
struct vif_device *vif,
vifi_t vif_index, u32 tb_id)
{
- struct vif_entry_notifier_info info = {
- .info = {
- .family = RTNL_FAMILY_IPMR,
- .net = net,
- },
- .dev = vif->dev,
- .vif_index = vif_index,
- .vif_flags = vif->flags,
- .tb_id = tb_id,
- };
-
- ASSERT_RTNL();
- net->ipv4.ipmr_seq++;
- return call_fib_notifiers(net, event_type, &info.info);
-}
-
-static int call_ipmr_mfc_entry_notifier(struct notifier_block *nb,
- struct net *net,
- enum fib_event_type event_type,
- struct mfc_cache *mfc, u32 tb_id)
-{
- struct mfc_entry_notifier_info info = {
- .info = {
- .family = RTNL_FAMILY_IPMR,
- .net = net,
- },
- .mfc = mfc,
- .tb_id = tb_id
- };
-
- return call_fib_notifier(nb, net, event_type, &info.info);
+ return mr_call_vif_notifiers(net, RTNL_FAMILY_IPMR, event_type,
+ vif, vif_index, tb_id,
+ &net->ipv4.ipmr_seq);
}
static int call_ipmr_mfc_entry_notifiers(struct net *net,
enum fib_event_type event_type,
struct mfc_cache *mfc, u32 tb_id)
{
- struct mfc_entry_notifier_info info = {
- .info = {
- .family = RTNL_FAMILY_IPMR,
- .net = net,
- },
- .mfc = mfc,
- .tb_id = tb_id
- };
-
- ASSERT_RTNL();
- net->ipv4.ipmr_seq++;
- return call_fib_notifiers(net, event_type, &info.info);
+ return mr_call_mfc_notifiers(net, RTNL_FAMILY_IPMR, event_type,
+ &mfc->_c, tb_id, &net->ipv4.ipmr_seq);
}
/**
@@ -790,11 +732,10 @@ static void ipmr_cache_free_rcu(struct rcu_head *head)
kmem_cache_free(mrt_cachep, (struct mfc_cache *)c);
}
-void ipmr_cache_free(struct mfc_cache *c)
+static void ipmr_cache_free(struct mfc_cache *c)
{
call_rcu(&c->_c.rcu, ipmr_cache_free_rcu);
}
-EXPORT_SYMBOL(ipmr_cache_free);
/* Destroy an unresolved cache entry, killing queued skbs
* and reporting error to netlink readers.
@@ -1045,6 +986,7 @@ static struct mfc_cache *ipmr_cache_alloc(void)
if (c) {
c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->_c.mfc_un.res.minvif = MAXVIFS;
+ c->_c.free = ipmr_cache_free_rcu;
refcount_set(&c->_c.mfc_un.res.refcount, 1);
}
return c;
@@ -1264,7 +1206,7 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
list_del_rcu(&c->_c.list);
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
mroute_netlink_event(mrt, c, RTM_DELROUTE);
- ipmr_cache_put(c);
+ mr_cache_put(&c->_c);
return 0;
}
@@ -1376,7 +1318,7 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
mrt->id);
mroute_netlink_event(mrt, cache, RTM_DELROUTE);
- ipmr_cache_put(cache);
+ mr_cache_put(c);
}
if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
@@ -2989,38 +2931,8 @@ static unsigned int ipmr_seq_read(struct net *net)
static int ipmr_dump(struct net *net, struct notifier_block *nb)
{
- struct mr_table *mrt;
- int err;
-
- err = ipmr_rules_dump(net, nb);
- if (err)
- return err;
-
- ipmr_for_each_table(mrt, net) {
- struct vif_device *v = &mrt->vif_table[0];
- struct mr_mfc *mfc;
- int vifi;
-
- /* Notifiy on table VIF entries */
- read_lock(&mrt_lock);
- for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
- if (!v->dev)
- continue;
-
- call_ipmr_vif_entry_notifier(nb, net, FIB_EVENT_VIF_ADD,
- v, vifi, mrt->id);
- }
- read_unlock(&mrt_lock);
-
- /* Notify on table MFC entries */
- list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
- call_ipmr_mfc_entry_notifier(nb, net,
- FIB_EVENT_ENTRY_ADD,
- (struct mfc_cache *)mfc,
- mrt->id);
- }
-
- return 0;
+ return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump,
+ ipmr_mr_table_iter, &mrt_lock);
}
static const struct fib_notifier_ops ipmr_notifier_ops_template = {
@@ -3097,7 +3009,6 @@ static void __net_exit ipmr_net_exit(struct net *net)
static struct pernet_operations ipmr_net_ops = {
.init = ipmr_net_init,
.exit = ipmr_net_exit,
- .async = true,
};
int __init ip_mr_init(void)
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index 8ba55bfda817..4fe97723b53f 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -321,3 +321,45 @@ done:
return skb->len;
}
EXPORT_SYMBOL(mr_rtm_dumproute);
+
+int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
+ int (*rules_dump)(struct net *net,
+ struct notifier_block *nb),
+ struct mr_table *(*mr_iter)(struct net *net,
+ struct mr_table *mrt),
+ rwlock_t *mrt_lock)
+{
+ struct mr_table *mrt;
+ int err;
+
+ err = rules_dump(net, nb);
+ if (err)
+ return err;
+
+ for (mrt = mr_iter(net, NULL); mrt; mrt = mr_iter(net, mrt)) {
+ struct vif_device *v = &mrt->vif_table[0];
+ struct mr_mfc *mfc;
+ int vifi;
+
+ /* Notifiy on table VIF entries */
+ read_lock(mrt_lock);
+ for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
+ if (!v->dev)
+ continue;
+
+ mr_call_vif_notifier(nb, net, family,
+ FIB_EVENT_VIF_ADD,
+ v, vifi, mrt->id);
+ }
+ read_unlock(mrt_lock);
+
+ /* Notify on table MFC entries */
+ list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
+ mr_call_mfc_notifier(nb, net, family,
+ FIB_EVENT_ENTRY_ADD,
+ mfc, mrt->id);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mr_dump);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index c36ffce3c812..e3e420f3ba7b 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1635,7 +1635,6 @@ static void __net_exit arp_tables_net_exit(struct net *net)
static struct pernet_operations arp_tables_net_ops = {
.init = arp_tables_net_init,
.exit = arp_tables_net_exit,
- .async = true,
};
static int __init arp_tables_init(void)
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 49c2490193ae..8f8713b4388f 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -65,7 +65,6 @@ static void __net_exit arptable_filter_net_exit(struct net *net)
static struct pernet_operations arptable_filter_net_ops = {
.exit = arptable_filter_net_exit,
- .async = true,
};
static int __init arptable_filter_init(void)
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d4f7584d2dbe..e38395a8dcf2 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1916,7 +1916,6 @@ static void __net_exit ip_tables_net_exit(struct net *net)
static struct pernet_operations ip_tables_net_ops = {
.init = ip_tables_net_init,
.exit = ip_tables_net_exit,
- .async = true,
};
static int __init ip_tables_init(void)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 0fc88fa7a4dc..2c8d313ae216 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -250,7 +250,7 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
/* create proc dir entry */
sprintf(buffer, "%pI4", &ip);
- c->pde = proc_create_data(buffer, S_IWUSR|S_IRUSR,
+ c->pde = proc_create_data(buffer, 0600,
cn->procdir,
&clusterip_proc_fops, c);
if (!c->pde) {
@@ -845,7 +845,6 @@ static struct pernet_operations clusterip_net_ops = {
.exit = clusterip_net_exit,
.id = &clusterip_net_id,
.size = sizeof(struct clusterip_net),
- .async = true,
};
static int __init clusterip_tg_init(void)
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index c1c136a93911..9ac92ea7b93c 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -87,7 +87,6 @@ static void __net_exit iptable_filter_net_exit(struct net *net)
static struct pernet_operations iptable_filter_net_ops = {
.init = iptable_filter_net_init,
.exit = iptable_filter_net_exit,
- .async = true,
};
static int __init iptable_filter_init(void)
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index f6074059531a..dea138ca8925 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -113,7 +113,6 @@ static void __net_exit iptable_mangle_net_exit(struct net *net)
static struct pernet_operations iptable_mangle_net_ops = {
.exit = iptable_mangle_net_exit,
- .async = true,
};
static int __init iptable_mangle_init(void)
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index b771af74be79..0f7255cc65ee 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -129,7 +129,6 @@ static void __net_exit iptable_nat_net_exit(struct net *net)
static struct pernet_operations iptable_nat_net_ops = {
.exit = iptable_nat_net_exit,
- .async = true,
};
static int __init iptable_nat_init(void)
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 963753e50842..960625aabf04 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -76,7 +76,6 @@ static void __net_exit iptable_raw_net_exit(struct net *net)
static struct pernet_operations iptable_raw_net_ops = {
.exit = iptable_raw_net_exit,
- .async = true,
};
static int __init iptable_raw_init(void)
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index c40d6b3d8b6a..e5379fe57b64 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -76,7 +76,6 @@ static void __net_exit iptable_security_net_exit(struct net *net)
static struct pernet_operations iptable_security_net_ops = {
.exit = iptable_security_net_exit,
- .async = true,
};
static int __init iptable_security_init(void)
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 6531f69db010..b50721d9d30e 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -399,7 +399,6 @@ static struct pernet_operations ipv4_net_ops = {
.exit = ipv4_net_exit,
.id = &conntrack4_net_id,
.size = sizeof(struct conntrack4_net),
- .async = true,
};
static int __init nf_conntrack_l3proto_ipv4_init(void)
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 57244b62a4fc..a0d3ad60a411 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -118,7 +118,6 @@ static void __net_exit defrag4_net_exit(struct net *net)
static struct pernet_operations defrag4_net_ops = {
.exit = defrag4_net_exit,
- .async = true,
};
static int __init nf_defrag_init(void)
diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
index 162293469ac2..df5c2a2061a4 100644
--- a/net/ipv4/netfilter/nf_log_arp.c
+++ b/net/ipv4/netfilter/nf_log_arp.c
@@ -122,7 +122,6 @@ static void __net_exit nf_log_arp_net_exit(struct net *net)
static struct pernet_operations nf_log_arp_net_ops = {
.init = nf_log_arp_net_init,
.exit = nf_log_arp_net_exit,
- .async = true,
};
static int __init nf_log_arp_init(void)
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
index 7a06de140f3c..4388de0e5380 100644
--- a/net/ipv4/netfilter/nf_log_ipv4.c
+++ b/net/ipv4/netfilter/nf_log_ipv4.c
@@ -358,7 +358,6 @@ static void __net_exit nf_log_ipv4_net_exit(struct net *net)
static struct pernet_operations nf_log_ipv4_net_ops = {
.init = nf_log_ipv4_net_init,
.exit = nf_log_ipv4_net_exit,
- .async = true,
};
static int __init nf_log_ipv4_init(void)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 0164def9c808..05e47d777009 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -1177,7 +1177,7 @@ static struct ping_seq_afinfo ping_v4_seq_afinfo = {
int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo)
{
struct proc_dir_entry *p;
- p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
+ p = proc_create_data(afinfo->name, 0444, net->proc_net,
afinfo->seq_fops, afinfo);
if (!p)
return -ENOMEM;
@@ -1204,7 +1204,6 @@ static void __net_exit ping_v4_proc_exit_net(struct net *net)
static struct pernet_operations ping_v4_net_ops = {
.init = ping_v4_proc_init_net,
.exit = ping_v4_proc_exit_net,
- .async = true,
};
int __init ping_proc_init(void)
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index d97e83b2dd33..adfb75340275 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -521,12 +521,12 @@ static const struct file_operations netstat_seq_fops = {
static __net_init int ip_proc_init_net(struct net *net)
{
- if (!proc_create("sockstat", S_IRUGO, net->proc_net,
+ if (!proc_create("sockstat", 0444, net->proc_net,
&sockstat_seq_fops))
goto out_sockstat;
- if (!proc_create("netstat", S_IRUGO, net->proc_net, &netstat_seq_fops))
+ if (!proc_create("netstat", 0444, net->proc_net, &netstat_seq_fops))
goto out_netstat;
- if (!proc_create("snmp", S_IRUGO, net->proc_net, &snmp_seq_fops))
+ if (!proc_create("snmp", 0444, net->proc_net, &snmp_seq_fops))
goto out_snmp;
return 0;
@@ -549,7 +549,6 @@ static __net_exit void ip_proc_exit_net(struct net *net)
static __net_initdata struct pernet_operations ip_proc_ops = {
.init = ip_proc_init_net,
.exit = ip_proc_exit_net,
- .async = true,
};
int __init ip_misc_proc_init(void)
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 720bef7da2f6..1b4d3355624a 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -1140,7 +1140,7 @@ static const struct file_operations raw_seq_fops = {
static __net_init int raw_init_net(struct net *net)
{
- if (!proc_create("raw", S_IRUGO, net->proc_net, &raw_seq_fops))
+ if (!proc_create("raw", 0444, net->proc_net, &raw_seq_fops))
return -ENOMEM;
return 0;
@@ -1154,7 +1154,6 @@ static __net_exit void raw_exit_net(struct net *net)
static __net_initdata struct pernet_operations raw_net_ops = {
.init = raw_init_net,
.exit = raw_exit_net,
- .async = true,
};
int __init raw_proc_init(void)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 4ac5728689f5..8322e479f299 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -379,12 +379,12 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
{
struct proc_dir_entry *pde;
- pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
+ pde = proc_create("rt_cache", 0444, net->proc_net,
&rt_cache_seq_fops);
if (!pde)
goto err1;
- pde = proc_create("rt_cache", S_IRUGO,
+ pde = proc_create("rt_cache", 0444,
net->proc_net_stat, &rt_cpu_seq_fops);
if (!pde)
goto err2;
@@ -418,7 +418,6 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
static struct pernet_operations ip_rt_proc_ops __net_initdata = {
.init = ip_rt_do_proc_init,
.exit = ip_rt_do_proc_exit,
- .async = true,
};
static int __init ip_rt_proc_init(void)
@@ -3017,7 +3016,6 @@ static __net_exit void sysctl_route_net_exit(struct net *net)
static __net_initdata struct pernet_operations sysctl_route_ops = {
.init = sysctl_route_net_init,
.exit = sysctl_route_net_exit,
- .async = true,
};
#endif
@@ -3031,7 +3029,6 @@ static __net_init int rt_genid_init(struct net *net)
static __net_initdata struct pernet_operations rt_genid_ops = {
.init = rt_genid_init,
- .async = true,
};
static int __net_init ipv4_inetpeer_init(struct net *net)
@@ -3057,7 +3054,6 @@ static void __net_exit ipv4_inetpeer_exit(struct net *net)
static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
.init = ipv4_inetpeer_init,
.exit = ipv4_inetpeer_exit,
- .async = true,
};
#ifdef CONFIG_IP_ROUTE_CLASSID
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 5b72d97693f8..4b195bac8ac0 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -1219,7 +1219,6 @@ static __net_exit void ipv4_sysctl_exit_net(struct net *net)
static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
.init = ipv4_sysctl_init_net,
.exit = ipv4_sysctl_exit_net,
- .async = true,
};
static __init int sysctl_ipv4_init(void)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 2c6aec2643e8..9639334ebb7c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2215,7 +2215,7 @@ int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
afinfo->seq_ops.next = tcp_seq_next;
afinfo->seq_ops.stop = tcp_seq_stop;
- p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
+ p = proc_create_data(afinfo->name, 0444, net->proc_net,
afinfo->seq_fops, afinfo);
if (!p)
rc = -ENOMEM;
@@ -2391,7 +2391,6 @@ static void __net_exit tcp4_proc_exit_net(struct net *net)
static struct pernet_operations tcp4_net_ops = {
.init = tcp4_proc_init_net,
.exit = tcp4_proc_exit_net,
- .async = true,
};
int __init tcp4_proc_init(void)
@@ -2578,7 +2577,6 @@ static struct pernet_operations __net_initdata tcp_sk_ops = {
.init = tcp_sk_init,
.exit = tcp_sk_exit,
.exit_batch = tcp_sk_exit_batch,
- .async = true,
};
void __init tcp_v4_init(void)
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index aa6fea9f3328..03b51cdcc731 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -1024,7 +1024,6 @@ static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_lis
static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
.init = tcp_net_metrics_init,
.exit_batch = tcp_net_metrics_exit_batch,
- .async = true,
};
void __init tcp_metrics_init(void)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c6dc019bc64b..f49e14cd3891 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2673,7 +2673,7 @@ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
afinfo->seq_ops.next = udp_seq_next;
afinfo->seq_ops.stop = udp_seq_stop;
- p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
+ p = proc_create_data(afinfo->name, 0444, net->proc_net,
afinfo->seq_fops, afinfo);
if (!p)
rc = -ENOMEM;
@@ -2756,7 +2756,6 @@ static void __net_exit udp4_proc_exit_net(struct net *net)
static struct pernet_operations udp4_net_ops = {
.init = udp4_proc_init_net,
.exit = udp4_proc_exit_net,
- .async = true,
};
int __init udp4_proc_init(void)
@@ -2843,7 +2842,6 @@ static int __net_init udp_sysctl_init(struct net *net)
static struct pernet_operations __net_initdata udp_sysctl_ops = {
.init = udp_sysctl_init,
- .async = true,
};
void __init udp_init(void)
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 72f2c3806408..f96614e9b9a5 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -104,7 +104,6 @@ static void __net_exit udplite4_proc_exit_net(struct net *net)
static struct pernet_operations udplite4_net_ops = {
.init = udplite4_proc_init_net,
.exit = udplite4_proc_exit_net,
- .async = true,
};
static __init int udplite4_proc_init(void)
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 6c76a757fa4a..d73a6d6652f6 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -367,7 +367,6 @@ static void __net_exit xfrm4_net_exit(struct net *net)
static struct pernet_operations __net_initdata xfrm4_net_ops = {
.init = xfrm4_net_init,
.exit = xfrm4_net_exit,
- .async = true,
};
static void __init xfrm4_policy_init(void)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6fd4bbdc444f..78cef00c9596 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -94,15 +94,6 @@
#include <linux/seq_file.h>
#include <linux/export.h>
-/* Set to 3 to get tracing... */
-#define ACONF_DEBUG 2
-
-#if ACONF_DEBUG >= 3
-#define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
-#else
-#define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
-#endif
-
#define INFINITY_LIFE_TIME 0xFFFFFFFF
#define IPV6_MAX_STRLEN \
@@ -409,9 +400,8 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
dev_hold(dev);
if (snmp6_alloc_dev(ndev) < 0) {
- ADBG(KERN_WARNING
- "%s: cannot allocate memory for statistics; dev=%s.\n",
- __func__, dev->name);
+ netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
+ __func__);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
dev_put(dev);
kfree(ndev);
@@ -419,9 +409,8 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
}
if (snmp6_register_dev(ndev) < 0) {
- ADBG(KERN_WARNING
- "%s: cannot create /proc/net/dev_snmp6/%s\n",
- __func__, dev->name);
+ netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
+ __func__, dev->name);
goto err_release;
}
@@ -984,7 +973,7 @@ static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
/* Ignore adding duplicate addresses on an interface */
if (ipv6_chk_same_addr(dev_net(dev), &ifa->addr, dev, hash)) {
- ADBG("ipv6_add_addr: already assigned\n");
+ netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
err = -EEXIST;
} else {
hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
@@ -1044,7 +1033,6 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
ifa = kzalloc(sizeof(*ifa), gfp_flags);
if (!ifa) {
- ADBG("ipv6_add_addr: malloc failed\n");
err = -ENOBUFS;
goto out;
}
@@ -2618,7 +2606,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
pinfo = (struct prefix_info *) opt;
if (len < sizeof(struct prefix_info)) {
- ADBG("addrconf: prefix option too short\n");
+ netdev_dbg(dev, "addrconf: prefix option too short\n");
return;
}
@@ -4281,7 +4269,7 @@ static const struct file_operations if6_fops = {
static int __net_init if6_proc_net_init(struct net *net)
{
- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
+ if (!proc_create("if_inet6", 0444, net->proc_net, &if6_fops))
return -ENOMEM;
return 0;
}
@@ -4294,7 +4282,6 @@ static void __net_exit if6_proc_net_exit(struct net *net)
static struct pernet_operations if6_proc_net_ops = {
.init = if6_proc_net_init,
.exit = if6_proc_net_exit,
- .async = true,
};
int __init if6_proc_init(void)
@@ -4446,8 +4433,8 @@ restart:
if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
- ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
- now, next, next_sec, next_sched);
+ pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
+ now, next, next_sec, next_sched);
mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
rcu_read_unlock_bh();
}
@@ -6604,7 +6591,6 @@ static void __net_exit addrconf_exit_net(struct net *net)
static struct pernet_operations addrconf_ops = {
.init = addrconf_init_net,
.exit = addrconf_exit_net,
- .async = true,
};
static struct rtnl_af_ops inet6_ops __read_mostly = {
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index ba2e63633370..1d6ced37ad71 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -344,7 +344,6 @@ static void __net_exit ip6addrlbl_net_exit(struct net *net)
static struct pernet_operations ipv6_addr_label_ops = {
.init = ip6addrlbl_net_init,
.exit = ip6addrlbl_net_exit,
- .async = true,
};
int __init ipv6_addr_label_init(void)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index dbbe04018813..c1e292db04db 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -857,7 +857,6 @@ static void __net_exit inet6_net_exit(struct net *net)
static struct pernet_operations inet6_net_ops = {
.init = inet6_net_init,
.exit = inet6_net_exit,
- .async = true,
};
static const struct ipv6_stub ipv6_stub_impl = {
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index d580d4d456a5..bbcabbba9bd8 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -544,7 +544,7 @@ static const struct file_operations ac6_seq_fops = {
int __net_init ac6_proc_init(struct net *net)
{
- if (!proc_create("anycast6", S_IRUGO, net->proc_net, &ac6_seq_fops))
+ if (!proc_create("anycast6", 0444, net->proc_net, &ac6_seq_fops))
return -ENOMEM;
return 0;
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 3fd1ec775dc2..27f59b61f70f 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -165,6 +165,8 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
(x->xso.dev != skb->dev))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+ else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
+ esp_features = features & ~NETIF_F_CSUM_MASK;
xo->flags |= XFRM_GSO_SEGMENT;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 00ef9467f3c0..df113c7b5fc8 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -397,7 +397,6 @@ static void __net_exit fib6_rules_net_exit(struct net *net)
static struct pernet_operations fib6_rules_net_ops = {
.init = fib6_rules_net_init,
.exit = fib6_rules_net_exit,
- .async = true,
};
int __init fib6_rules_init(void)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 6f84668be6ea..d8c4b6374377 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -998,7 +998,6 @@ static void __net_exit icmpv6_sk_exit(struct net *net)
static struct pernet_operations icmpv6_sk_ops = {
.init = icmpv6_sk_init,
.exit = icmpv6_sk_exit,
- .async = true,
};
int __init icmpv6_init(void)
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index e438699f000f..44c39c5f0638 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -613,7 +613,6 @@ static struct pernet_operations ila_net_ops = {
.exit = ila_exit_net,
.id = &ila_net_id,
.size = sizeof(struct ila_net),
- .async = true,
};
static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 2f995e9e3050..deab2db6692e 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1007,12 +1007,16 @@ add:
if (err)
return err;
+ err = call_fib6_entry_notifiers(info->nl_net,
+ FIB_EVENT_ENTRY_ADD,
+ rt, extack);
+ if (err)
+ return err;
+
rcu_assign_pointer(rt->rt6_next, iter);
atomic_inc(&rt->rt6i_ref);
rcu_assign_pointer(rt->rt6i_node, fn);
rcu_assign_pointer(*ins, rt);
- call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_ADD,
- rt, extack);
if (!info->skip_notify)
inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
@@ -1036,12 +1040,16 @@ add:
if (err)
return err;
+ err = call_fib6_entry_notifiers(info->nl_net,
+ FIB_EVENT_ENTRY_REPLACE,
+ rt, extack);
+ if (err)
+ return err;
+
atomic_inc(&rt->rt6i_ref);
rcu_assign_pointer(rt->rt6i_node, fn);
rt->rt6_next = iter->rt6_next;
rcu_assign_pointer(*ins, rt);
- call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_REPLACE,
- rt, extack);
if (!info->skip_notify)
inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
if (!(fn->fn_flags & RTN_RTINFO)) {
@@ -2161,7 +2169,6 @@ static void fib6_net_exit(struct net *net)
static struct pernet_operations fib6_net_ops = {
.init = fib6_net_init,
.exit = fib6_net_exit,
- .async = true,
};
int __init fib6_init(void)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 6ddf52282894..c05c4e82a7ca 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -844,7 +844,7 @@ static const struct file_operations ip6fl_seq_fops = {
static int __net_init ip6_flowlabel_proc_init(struct net *net)
{
- if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
+ if (!proc_create("ip6_flowlabel", 0444, net->proc_net,
&ip6fl_seq_fops))
return -ENOMEM;
return 0;
@@ -873,7 +873,6 @@ static void __net_exit ip6_flowlabel_net_exit(struct net *net)
static struct pernet_operations ip6_flowlabel_net_ops = {
.init = ip6_flowlabel_proc_init,
.exit = ip6_flowlabel_net_exit,
- .async = true,
};
int ip6_flowlabel_init(void)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 3a98c694da5f..22e86557aca4 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1528,7 +1528,6 @@ static struct pernet_operations ip6gre_net_ops = {
.exit_batch = ip6gre_exit_batch_net,
.id = &ip6gre_net_id,
.size = sizeof(struct ip6gre_net),
- .async = true,
};
static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 456fcf942f95..df4c29f7d59f 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -2260,7 +2260,6 @@ static struct pernet_operations ip6_tnl_net_ops = {
.exit_batch = ip6_tnl_exit_batch_net,
.id = &ip6_tnl_net_id,
.size = sizeof(struct ip6_tnl_net),
- .async = true,
};
/**
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index a482b854eeea..60b771f49fb5 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -1148,7 +1148,6 @@ static struct pernet_operations vti6_net_ops = {
.exit_batch = vti6_exit_batch_net,
.id = &vti6_net_id,
.size = sizeof(struct vti6_net),
- .async = true,
};
static struct xfrm6_protocol vti_esp6_protocol __read_mostly = {
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 7345bd6c4b7d..298fd8b6ed17 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -258,6 +258,23 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
fib_rules_unregister(net->ipv6.mr6_rules_ops);
rtnl_unlock();
}
+
+static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
+{
+ return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR);
+}
+
+static unsigned int ip6mr_rules_seq_read(struct net *net)
+{
+ return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
+}
+
+bool ip6mr_rule_default(const struct fib_rule *rule)
+{
+ return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
+ rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
+}
+EXPORT_SYMBOL(ip6mr_rule_default);
#else
#define ip6mr_for_each_table(mrt, net) \
for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
@@ -295,6 +312,16 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
net->ipv6.mrt6 = NULL;
rtnl_unlock();
}
+
+static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
+{
+ return 0;
+}
+
+static unsigned int ip6mr_rules_seq_read(struct net *net)
+{
+ return 0;
+}
#endif
static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
@@ -653,10 +680,25 @@ failure:
}
#endif
-/*
- * Delete a VIF entry
- */
+static int call_ip6mr_vif_entry_notifiers(struct net *net,
+ enum fib_event_type event_type,
+ struct vif_device *vif,
+ mifi_t vif_index, u32 tb_id)
+{
+ return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
+ vif, vif_index, tb_id,
+ &net->ipv6.ipmr_seq);
+}
+static int call_ip6mr_mfc_entry_notifiers(struct net *net,
+ enum fib_event_type event_type,
+ struct mfc6_cache *mfc, u32 tb_id)
+{
+ return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
+ &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
+}
+
+/* Delete a VIF entry */
static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
struct list_head *head)
{
@@ -669,6 +711,11 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
v = &mrt->vif_table[vifi];
+ if (VIF_EXISTS(mrt, vifi))
+ call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
+ FIB_EVENT_VIF_DEL, v, vifi,
+ mrt->id);
+
write_lock_bh(&mrt_lock);
dev = v->dev;
v->dev = NULL;
@@ -887,6 +934,8 @@ static int mif6_add(struct net *net, struct mr_table *mrt,
if (vifi + 1 > mrt->maxvif)
mrt->maxvif = vifi + 1;
write_unlock_bh(&mrt_lock);
+ call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
+ v, vifi, mrt->id);
return 0;
}
@@ -940,6 +989,8 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
return NULL;
c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->_c.mfc_un.res.minvif = MAXMIFS;
+ c->_c.free = ip6mr_cache_free_rcu;
+ refcount_set(&c->_c.mfc_un.res.refcount, 1);
return c;
}
@@ -1175,8 +1226,10 @@ static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
list_del_rcu(&c->_c.list);
+ call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
+ FIB_EVENT_ENTRY_DEL, c, mrt->id);
mr6_netlink_event(mrt, c, RTM_DELROUTE);
- ip6mr_cache_free(c);
+ mr_cache_put(&c->_c);
return 0;
}
@@ -1203,21 +1256,63 @@ static int ip6mr_device_event(struct notifier_block *this,
return NOTIFY_DONE;
}
+static unsigned int ip6mr_seq_read(struct net *net)
+{
+ ASSERT_RTNL();
+
+ return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
+}
+
+static int ip6mr_dump(struct net *net, struct notifier_block *nb)
+{
+ return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
+ ip6mr_mr_table_iter, &mrt_lock);
+}
+
static struct notifier_block ip6_mr_notifier = {
.notifier_call = ip6mr_device_event
};
-/*
- * Setup for IP multicast routing
- */
+static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
+ .family = RTNL_FAMILY_IP6MR,
+ .fib_seq_read = ip6mr_seq_read,
+ .fib_dump = ip6mr_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __net_init ip6mr_notifier_init(struct net *net)
+{
+ struct fib_notifier_ops *ops;
+
+ net->ipv6.ipmr_seq = 0;
+ ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ net->ipv6.ip6mr_notifier_ops = ops;
+
+ return 0;
+}
+
+static void __net_exit ip6mr_notifier_exit(struct net *net)
+{
+ fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
+ net->ipv6.ip6mr_notifier_ops = NULL;
+}
+
+/* Setup for IP multicast routing */
static int __net_init ip6mr_net_init(struct net *net)
{
int err;
+ err = ip6mr_notifier_init(net);
+ if (err)
+ return err;
+
err = ip6mr_rules_init(net);
if (err < 0)
- goto fail;
+ goto ip6mr_rules_fail;
#ifdef CONFIG_PROC_FS
err = -ENOMEM;
@@ -1235,7 +1330,8 @@ proc_cache_fail:
proc_vif_fail:
ip6mr_rules_exit(net);
#endif
-fail:
+ip6mr_rules_fail:
+ ip6mr_notifier_exit(net);
return err;
}
@@ -1246,12 +1342,12 @@ static void __net_exit ip6mr_net_exit(struct net *net)
remove_proc_entry("ip6_mr_vif", net->proc_net);
#endif
ip6mr_rules_exit(net);
+ ip6mr_notifier_exit(net);
}
static struct pernet_operations ip6mr_net_ops = {
.init = ip6mr_net_init,
.exit = ip6mr_net_exit,
- .async = true,
};
int __init ip6_mr_init(void)
@@ -1337,6 +1433,8 @@ static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
if (!mrtsock)
c->_c.mfc_flags |= MFC_STATIC;
write_unlock_bh(&mrt_lock);
+ call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
+ c, mrt->id);
mr6_netlink_event(mrt, c, RTM_NEWROUTE);
return 0;
}
@@ -1388,6 +1486,8 @@ static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
ip6mr_cache_resolve(net, mrt, uc, c);
ip6mr_cache_free(uc);
}
+ call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
+ c, mrt->id);
mr6_netlink_event(mrt, c, RTM_NEWROUTE);
return 0;
}
@@ -1417,13 +1517,17 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
list_del_rcu(&c->list);
mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
- ip6mr_cache_free((struct mfc6_cache *)c);
+ mr_cache_put(c);
}
if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
list_del(&c->list);
+ call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
+ FIB_EVENT_ENTRY_DEL,
+ (struct mfc6_cache *)c,
+ mrt->id);
mr6_netlink_event(mrt, (struct mfc6_cache *)c,
RTM_DELROUTE);
ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index d1a0cefac273..793159d77d8a 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2921,9 +2921,9 @@ static int __net_init igmp6_proc_init(struct net *net)
int err;
err = -ENOMEM;
- if (!proc_create("igmp6", S_IRUGO, net->proc_net, &igmp6_mc_seq_fops))
+ if (!proc_create("igmp6", 0444, net->proc_net, &igmp6_mc_seq_fops))
goto out;
- if (!proc_create("mcfilter6", S_IRUGO, net->proc_net,
+ if (!proc_create("mcfilter6", 0444, net->proc_net,
&igmp6_mcf_seq_fops))
goto out_proc_net_igmp6;
@@ -2997,7 +2997,6 @@ static void __net_exit igmp6_net_exit(struct net *net)
static struct pernet_operations igmp6_net_ops = {
.init = igmp6_net_init,
.exit = igmp6_net_exit,
- .async = true,
};
int __init igmp6_init(void)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d1d0b2fa7a07..9de4dfb126ba 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1883,7 +1883,6 @@ static void __net_exit ndisc_net_exit(struct net *net)
static struct pernet_operations ndisc_net_ops = {
.init = ndisc_net_init,
.exit = ndisc_net_exit,
- .async = true,
};
int __init ndisc_init(void)
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 4de8ac1e5af4..62358b93bbac 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1928,7 +1928,6 @@ static void __net_exit ip6_tables_net_exit(struct net *net)
static struct pernet_operations ip6_tables_net_ops = {
.init = ip6_tables_net_init,
.exit = ip6_tables_net_exit,
- .async = true,
};
static int __init ip6_tables_init(void)
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 06561c84c0bc..1343077dde93 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -87,7 +87,6 @@ static void __net_exit ip6table_filter_net_exit(struct net *net)
static struct pernet_operations ip6table_filter_net_ops = {
.init = ip6table_filter_net_init,
.exit = ip6table_filter_net_exit,
- .async = true,
};
static int __init ip6table_filter_init(void)
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index a11e25936b45..b0524b18c4fb 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -107,7 +107,6 @@ static void __net_exit ip6table_mangle_net_exit(struct net *net)
static struct pernet_operations ip6table_mangle_net_ops = {
.exit = ip6table_mangle_net_exit,
- .async = true,
};
static int __init ip6table_mangle_init(void)
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 4475fd300bb6..47306e45a80a 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -131,7 +131,6 @@ static void __net_exit ip6table_nat_net_exit(struct net *net)
static struct pernet_operations ip6table_nat_net_ops = {
.exit = ip6table_nat_net_exit,
- .async = true,
};
static int __init ip6table_nat_init(void)
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index a88f3b1995b1..710fa0806c37 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -75,7 +75,6 @@ static void __net_exit ip6table_raw_net_exit(struct net *net)
static struct pernet_operations ip6table_raw_net_ops = {
.exit = ip6table_raw_net_exit,
- .async = true,
};
static int __init ip6table_raw_init(void)
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 320048c008dc..cf26ccb04056 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -74,7 +74,6 @@ static void __net_exit ip6table_security_net_exit(struct net *net)
static struct pernet_operations ip6table_security_net_ops = {
.exit = ip6table_security_net_exit,
- .async = true,
};
static int __init ip6table_security_init(void)
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index ba54bb3bd1e4..663827ee3cf8 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -401,7 +401,6 @@ static struct pernet_operations ipv6_net_ops = {
.exit = ipv6_net_exit,
.id = &conntrack6_net_id,
.size = sizeof(struct conntrack6_net),
- .async = true,
};
static int __init nf_conntrack_l3proto_ipv6_init(void)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 34136fe80ed5..b84ce3e6d728 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -646,7 +646,6 @@ static void nf_ct_net_exit(struct net *net)
static struct pernet_operations nf_ct_net_ops = {
.init = nf_ct_net_init,
.exit = nf_ct_net_exit,
- .async = true,
};
int nf_ct_frag6_init(void)
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 32f98bc06900..c87b48359e8f 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -103,7 +103,6 @@ static void __net_exit defrag6_net_exit(struct net *net)
static struct pernet_operations defrag6_net_ops = {
.exit = defrag6_net_exit,
- .async = true,
};
static int __init nf_defrag_init(void)
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index 0220e584589c..b397a8fe88b9 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -390,7 +390,6 @@ static void __net_exit nf_log_ipv6_net_exit(struct net *net)
static struct pernet_operations nf_log_ipv6_net_ops = {
.init = nf_log_ipv6_net_init,
.exit = nf_log_ipv6_net_exit,
- .async = true,
};
static int __init nf_log_ipv6_init(void)
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 318c6e914234..d12c55dad7d1 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -240,7 +240,6 @@ static void __net_init ping_v6_proc_exit_net(struct net *net)
static struct pernet_operations ping_v6_net_ops = {
.init = ping_v6_proc_init_net,
.exit = ping_v6_proc_exit_net,
- .async = true,
};
#endif
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 1678cf037688..6e57028d2e91 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -290,7 +290,7 @@ int snmp6_register_dev(struct inet6_dev *idev)
if (!net->mib.proc_net_devsnmp6)
return -ENOENT;
- p = proc_create_data(idev->dev->name, S_IRUGO,
+ p = proc_create_data(idev->dev->name, 0444,
net->mib.proc_net_devsnmp6,
&snmp6_dev_seq_fops, idev);
if (!p)
@@ -314,11 +314,11 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
static int __net_init ipv6_proc_init_net(struct net *net)
{
- if (!proc_create("sockstat6", S_IRUGO, net->proc_net,
+ if (!proc_create("sockstat6", 0444, net->proc_net,
&sockstat6_seq_fops))
return -ENOMEM;
- if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
+ if (!proc_create("snmp6", 0444, net->proc_net, &snmp6_seq_fops))
goto proc_snmp6_fail;
net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
@@ -343,7 +343,6 @@ static void __net_exit ipv6_proc_exit_net(struct net *net)
static struct pernet_operations ipv6_proc_ops = {
.init = ipv6_proc_init_net,
.exit = ipv6_proc_exit_net,
- .async = true,
};
int __init ipv6_misc_proc_init(void)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 10a4ac4933b7..5eb9b08947ed 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1318,7 +1318,7 @@ static const struct file_operations raw6_seq_fops = {
static int __net_init raw6_init_net(struct net *net)
{
- if (!proc_create("raw6", S_IRUGO, net->proc_net, &raw6_seq_fops))
+ if (!proc_create("raw6", 0444, net->proc_net, &raw6_seq_fops))
return -ENOMEM;
return 0;
@@ -1332,7 +1332,6 @@ static void __net_exit raw6_exit_net(struct net *net)
static struct pernet_operations raw6_net_ops = {
.init = raw6_init_net,
.exit = raw6_exit_net,
- .async = true,
};
int __init raw6_proc_init(void)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index b5da69c83123..08a139f14d0f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -650,10 +650,6 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
table[1].data = &net->ipv6.frags.low_thresh;
table[1].extra2 = &net->ipv6.frags.high_thresh;
table[2].data = &net->ipv6.frags.timeout;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[0].procname = NULL;
}
hdr = register_net_sysctl(net, "net/ipv6", table);
@@ -733,7 +729,6 @@ static void __net_exit ipv6_frags_exit_net(struct net *net)
static struct pernet_operations ip6_frags_ops = {
.init = ipv6_frags_init_net,
.exit = ipv6_frags_exit_net,
- .async = true,
};
int __init ipv6_frag_init(void)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a2ed9fdd58d4..ba8d5df50ebe 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5067,7 +5067,7 @@ static int __net_init ip6_route_net_init_late(struct net *net)
{
#ifdef CONFIG_PROC_FS
proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
- proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
+ proc_create("rt6_stats", 0444, net->proc_net, &rt6_stats_seq_fops);
#endif
return 0;
}
@@ -5083,7 +5083,6 @@ static void __net_exit ip6_route_net_exit_late(struct net *net)
static struct pernet_operations ip6_route_net_ops = {
.init = ip6_route_net_init,
.exit = ip6_route_net_exit,
- .async = true,
};
static int __net_init ipv6_inetpeer_init(struct net *net)
@@ -5109,13 +5108,11 @@ static void __net_exit ipv6_inetpeer_exit(struct net *net)
static struct pernet_operations ipv6_inetpeer_ops = {
.init = ipv6_inetpeer_init,
.exit = ipv6_inetpeer_exit,
- .async = true,
};
static struct pernet_operations ip6_route_net_late_ops = {
.init = ip6_route_net_init_late,
.exit = ip6_route_net_exit_late,
- .async = true,
};
static struct notifier_block ip6_route_dev_notifier = {
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index c3f13c3bd8a9..7f5621d09571 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -395,7 +395,6 @@ static void __net_exit seg6_net_exit(struct net *net)
static struct pernet_operations ip6_segments_ops = {
.init = seg6_net_init,
.exit = seg6_net_exit,
- .async = true,
};
static const struct genl_ops seg6_genl_ops[] = {
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8a4f8fddd812..1522bcfd253f 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1888,7 +1888,6 @@ static struct pernet_operations sit_net_ops = {
.exit_batch = sit_exit_batch_net,
.id = &sit_net_id,
.size = sizeof(struct sit_net),
- .async = true,
};
static void __exit sit_cleanup(void)
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 966c42af92f4..6fbdef630152 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -278,7 +278,6 @@ static void __net_exit ipv6_sysctl_net_exit(struct net *net)
static struct pernet_operations ipv6_sysctl_net_ops = {
.init = ipv6_sysctl_net_init,
.exit = ipv6_sysctl_net_exit,
- .async = true,
};
static struct ctl_table_header *ip6_header;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5425d7b100ee..883df0ad5bfe 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2007,7 +2007,6 @@ static struct pernet_operations tcpv6_net_ops = {
.init = tcpv6_net_init,
.exit = tcpv6_net_exit,
.exit_batch = tcpv6_net_exit_batch,
- .async = true,
};
int __init tcpv6_init(void)
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index f3839780dc31..14ae32bb1f3d 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -123,7 +123,6 @@ static void __net_exit udplite6_proc_exit_net(struct net *net)
static struct pernet_operations udplite6_net_ops = {
.init = udplite6_proc_init_net,
.exit = udplite6_proc_exit_net,
- .async = true,
};
int __init udplite6_proc_init(void)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index cbb270bd81b0..416fe67271a9 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -400,7 +400,6 @@ static void __net_exit xfrm6_net_exit(struct net *net)
static struct pernet_operations xfrm6_net_ops = {
.init = xfrm6_net_init,
.exit = xfrm6_net_exit,
- .async = true,
};
int __init xfrm6_init(void)
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index a9673619e0e9..f85f0d7480ac 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -353,7 +353,6 @@ static struct pernet_operations xfrm6_tunnel_net_ops = {
.exit = xfrm6_tunnel_net_exit,
.id = &xfrm6_tunnel_net_id,
.size = sizeof(struct xfrm6_tunnel_net),
- .async = true,
};
static int __init xfrm6_tunnel_init(void)
diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c
index 2c1c8b3e4452..1fac92543094 100644
--- a/net/kcm/kcmproc.c
+++ b/net/kcm/kcmproc.c
@@ -269,7 +269,7 @@ static int kcm_proc_register(struct net *net, struct kcm_seq_muxinfo *muxinfo)
struct proc_dir_entry *p;
int rc = 0;
- p = proc_create_data(muxinfo->name, S_IRUGO, net->proc_net,
+ p = proc_create_data(muxinfo->name, 0444, net->proc_net,
muxinfo->seq_fops, muxinfo);
if (!p)
rc = -ENOMEM;
@@ -406,7 +406,7 @@ static int kcm_proc_init_net(struct net *net)
{
int err;
- if (!proc_create("kcm_stats", S_IRUGO, net->proc_net,
+ if (!proc_create("kcm_stats", 0444, net->proc_net,
&kcm_stats_seq_fops)) {
err = -ENOMEM;
goto out_kcm_stats;
@@ -433,7 +433,6 @@ static void kcm_proc_exit_net(struct net *net)
static struct pernet_operations kcm_net_ops = {
.init = kcm_proc_init_net,
.exit = kcm_proc_exit_net,
- .async = true,
};
int __init kcm_proc_init(void)
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 516cfad71b85..dc76bc346829 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -2028,7 +2028,6 @@ static struct pernet_operations kcm_net_ops = {
.exit = kcm_exit_net,
.id = &kcm_net_id,
.size = sizeof(struct kcm_net),
- .async = true,
};
static int __init kcm_init(void)
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 3ac08ab26207..7e2e7188e7f4 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3863,7 +3863,6 @@ static struct pernet_operations pfkey_net_ops = {
.exit = pfkey_net_exit,
.id = &pfkey_net_id,
.size = sizeof(struct netns_pfkey),
- .async = true,
};
static void __exit ipsec_pfkey_exit(void)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index b86868da50d4..14b67dfacc4b 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1789,7 +1789,6 @@ static struct pernet_operations l2tp_net_ops = {
.exit = l2tp_exit_net,
.id = &l2tp_net_id,
.size = sizeof(struct l2tp_net),
- .async = true,
};
static int __init l2tp_init(void)
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 977bca659787..d6deca11da19 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1742,7 +1742,7 @@ static __net_init int pppol2tp_init_net(struct net *net)
struct proc_dir_entry *pde;
int err = 0;
- pde = proc_create("pppol2tp", S_IRUGO, net->proc_net,
+ pde = proc_create("pppol2tp", 0444, net->proc_net,
&pppol2tp_proc_fops);
if (!pde) {
err = -ENOMEM;
@@ -1762,7 +1762,6 @@ static struct pernet_operations pppol2tp_net_ops = {
.init = pppol2tp_init_net,
.exit = pppol2tp_exit_net,
.id = &pppol2tp_net_id,
- .async = true,
};
/*****************************************************************************
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 66821e8a2b7a..62ea0aed94b4 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -249,11 +249,11 @@ int __init llc_proc_init(void)
if (!llc_proc_dir)
goto out;
- p = proc_create("socket", S_IRUGO, llc_proc_dir, &llc_seq_socket_fops);
+ p = proc_create("socket", 0444, llc_proc_dir, &llc_seq_socket_fops);
if (!p)
goto out_socket;
- p = proc_create("core", S_IRUGO, llc_proc_dir, &llc_seq_core_fops);
+ p = proc_create("core", 0444, llc_proc_dir, &llc_seq_core_fops);
if (!p)
goto out_core;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fd68f6fb02d7..85dbaa891059 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -4,6 +4,7 @@
* Copyright 2006-2010 Johannes Berg <[email protected]>
* Copyright 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This file is GPLv2 as found in COPYING.
*/
@@ -925,6 +926,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
*/
sdata->control_port_protocol = params->crypto.control_port_ethertype;
sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt;
+ sdata->control_port_over_nl80211 =
+ params->crypto.control_port_over_nl80211;
sdata->encrypt_headroom = ieee80211_cs_headroom(sdata->local,
&params->crypto,
sdata->vif.type);
@@ -934,6 +937,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
params->crypto.control_port_ethertype;
vlan->control_port_no_encrypt =
params->crypto.control_port_no_encrypt;
+ vlan->control_port_over_nl80211 =
+ params->crypto.control_port_over_nl80211;
vlan->encrypt_headroom =
ieee80211_cs_headroom(sdata->local,
&params->crypto,
@@ -2019,6 +2024,8 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
if (err)
return err;
+ sdata->control_port_over_nl80211 = setup->control_port_over_nl80211;
+
/* can mesh use other SMPS modes? */
sdata->smps_mode = IEEE80211_SMPS_OFF;
sdata->needed_rx_chains = sdata->local->rx_chains;
@@ -2156,6 +2163,8 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
*/
p.uapsd = false;
+ ieee80211_regulatory_limit_wmm_params(sdata, &p, params->ac);
+
sdata->tx_conf[params->ac] = p;
if (drv_conf_tx(local, sdata, params->ac, &p)) {
wiphy_debug(local->hw.wiphy,
@@ -2313,6 +2322,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
memcpy(sdata->vif.bss_conf.mcast_rate, rate,
sizeof(int) * NUM_NL80211_BANDS);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MCAST_RATE);
+
return 0;
}
@@ -3786,4 +3797,5 @@ const struct cfg80211_ops mac80211_config_ops = {
.add_nan_func = ieee80211_add_nan_func,
.del_nan_func = ieee80211_del_nan_func,
.set_multicast_to_unicast = ieee80211_set_multicast_to_unicast,
+ .tx_control_port = ieee80211_tx_control_port,
};
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index d7523530d3f8..c78036a0ac94 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -466,6 +466,21 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
__ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_PEER_REQUEST);
}
+enum nl80211_smps_mode
+ieee80211_smps_mode_to_smps_mode(enum ieee80211_smps_mode smps)
+{
+ switch (smps) {
+ case IEEE80211_SMPS_OFF:
+ return NL80211_SMPS_OFF;
+ case IEEE80211_SMPS_STATIC:
+ return NL80211_SMPS_STATIC;
+ case IEEE80211_SMPS_DYNAMIC:
+ return NL80211_SMPS_DYNAMIC;
+ default:
+ return NL80211_SMPS_OFF;
+ }
+}
+
int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
enum ieee80211_smps_mode smps, const u8 *da,
const u8 *bssid)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index db07e0de9a03..6449a1c2283b 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1839,11 +1839,12 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED
| IEEE80211_HT_PARAM_RIFS_MODE;
- changed |= BSS_CHANGED_HT;
+ changed |= BSS_CHANGED_HT | BSS_CHANGED_MCAST_RATE;
ieee80211_bss_info_change_notify(sdata, changed);
sdata->smps_mode = IEEE80211_SMPS_OFF;
sdata->needed_rx_chains = local->rx_chains;
+ sdata->control_port_over_nl80211 = params->control_port_over_nl80211;
ieee80211_queue_work(&local->hw, &sdata->work);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index ae9c33cd8ada..6372dbdadf53 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -4,6 +4,7 @@
* Copyright 2006-2007 Jiri Benc <[email protected]>
* Copyright 2007-2010 Johannes Berg <[email protected]>
* Copyright 2013-2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -899,6 +900,7 @@ struct ieee80211_sub_if_data {
u16 sequence_number;
__be16 control_port_protocol;
bool control_port_no_encrypt;
+ bool control_port_over_nl80211;
int encrypt_headroom;
atomic_t num_tx_queued;
@@ -1734,6 +1736,9 @@ void ieee80211_check_fast_xmit(struct sta_info *sta);
void ieee80211_check_fast_xmit_all(struct ieee80211_local *local);
void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata);
void ieee80211_clear_fast_xmit(struct sta_info *sta);
+int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *buf, size_t len,
+ const u8 *dest, __be16 proto, bool unencrypted);
/* HT */
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
@@ -1788,6 +1793,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs);
+enum nl80211_smps_mode
+ieee80211_smps_mode_to_smps_mode(enum ieee80211_smps_mode smps);
/* VHT */
void
@@ -1814,6 +1821,8 @@ void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_vht_cap *vht_cap);
void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
u16 vht_mask[NL80211_VHT_NSS_MAX]);
+enum nl80211_chan_width
+ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta);
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1865,6 +1874,9 @@ extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
int ieee80211_frame_duration(enum nl80211_band band, size_t len,
int rate, int erp, int short_preamble,
int shift);
+void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_tx_queue_params *qparam,
+ int ac);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
bool bss_notify, bool enable_qos);
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d13ba064951f..555e389b7dfa 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -519,6 +519,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
master->control_port_protocol;
sdata->control_port_no_encrypt =
master->control_port_no_encrypt;
+ sdata->control_port_over_nl80211 =
+ master->control_port_over_nl80211;
sdata->vif.cab_queue = master->vif.cab_queue;
memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
sizeof(sdata->vif.hw_queue));
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index aee05ec3f7ea..ee0d0cc8dc3b 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -126,7 +126,7 @@ static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_sub_if_data *sdata = key->sdata;
struct sta_info *sta;
int ret = -EOPNOTSUPP;
@@ -162,7 +162,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (sta && !sta->uploaded)
goto out_unsupported;
- sdata = key->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
/*
* The driver doesn't know anything about VLAN interfaces.
@@ -214,8 +213,11 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
/* all of these we can do in software - if driver can */
if (ret == 1)
return 0;
- if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
+ if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ return 0;
return -EINVAL;
+ }
return 0;
default:
return -EINVAL;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0785d04a80bc..9ea17afaa237 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -554,6 +554,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
NL80211_FEATURE_USERSPACE_MPM |
NL80211_FEATURE_FULL_AP_CLIENT_STATE;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_STA);
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211);
if (!ops->hw_scan)
wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -930,8 +932,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
IEEE80211_HT_CAP_SM_PS_SHIFT;
}
- /* if low-level driver supports AP, we also support VLAN */
- if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+ /* if low-level driver supports AP, we also support VLAN.
+ * drivers advertising SW_CRYPTO_CONTROL should enable AP_VLAN
+ * based on their support to transmit SW encrypted packets.
+ */
+ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP) &&
+ !ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL)) {
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6a381cbe1e33..d51da26e9c18 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -880,7 +880,8 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_HT |
BSS_CHANGED_BASIC_RATES |
- BSS_CHANGED_BEACON_INT;
+ BSS_CHANGED_BEACON_INT |
+ BSS_CHANGED_MCAST_RATE;
local->fif_other_bss++;
/* mesh ifaces must set allmulti to forward mcast traffic */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index fe4aefb06d9f..69449db7e283 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1787,12 +1787,14 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
params[ac].acm = acm;
params[ac].uapsd = uapsd;
- if (params[ac].cw_min > params[ac].cw_max) {
+ if (params->cw_min == 0 ||
+ params[ac].cw_min > params[ac].cw_max) {
sdata_info(sdata,
"AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n",
params[ac].cw_min, params[ac].cw_max, aci);
return false;
}
+ ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac);
}
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
@@ -2011,8 +2013,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
/* deauthenticate/disassociate now */
if (tx || frame_buf) {
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-
/*
* In multi channel scenarios guarantee that the virtual
* interface is granted immediate airtime to transmit the
@@ -3307,82 +3307,14 @@ static const u64 care_about_ies =
(1ULL << WLAN_EID_HT_OPERATION) |
(1ULL << WLAN_EID_EXT_CHANSWITCH_ANN);
-static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt, size_t len,
- struct ieee80211_rx_status *rx_status)
+static void ieee80211_handle_beacon_sig(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_if_managed *ifmgd,
+ struct ieee80211_bss_conf *bss_conf,
+ struct ieee80211_local *local,
+ struct ieee80211_rx_status *rx_status)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
- size_t baselen;
- struct ieee802_11_elems elems;
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_channel *chan;
- struct sta_info *sta;
- u32 changed = 0;
- bool erp_valid;
- u8 erp_value = 0;
- u32 ncrc;
- u8 *bssid;
- u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
-
- sdata_assert_lock(sdata);
-
- /* Process beacon from the current BSS */
- baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
- if (baselen > len)
- return;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf) {
- rcu_read_unlock();
- return;
- }
-
- if (rx_status->freq != chanctx_conf->def.chan->center_freq) {
- rcu_read_unlock();
- return;
- }
- chan = chanctx_conf->def.chan;
- rcu_read_unlock();
-
- if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon &&
- ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
- ieee802_11_parse_elems(mgmt->u.beacon.variable,
- len - baselen, false, &elems);
-
- ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
- if (elems.tim && !elems.parse_error) {
- const struct ieee80211_tim_ie *tim_ie = elems.tim;
- ifmgd->dtim_period = tim_ie->dtim_period;
- }
- ifmgd->have_beacon = true;
- ifmgd->assoc_data->need_beacon = false;
- if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
- sdata->vif.bss_conf.sync_tsf =
- le64_to_cpu(mgmt->u.beacon.timestamp);
- sdata->vif.bss_conf.sync_device_ts =
- rx_status->device_timestamp;
- if (elems.tim)
- sdata->vif.bss_conf.sync_dtim_count =
- elems.tim->dtim_count;
- else
- sdata->vif.bss_conf.sync_dtim_count = 0;
- }
- /* continue assoc process */
- ifmgd->assoc_data->timeout = jiffies;
- ifmgd->assoc_data->timeout_started = true;
- run_again(sdata, ifmgd->assoc_data->timeout);
- return;
- }
-
- if (!ifmgd->associated ||
- !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
- return;
- bssid = ifmgd->associated->bssid;
-
/* Track average RSSI from the Beacon frames of the current AP */
+
if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
ewma_beacon_signal_init(&ifmgd->ave_beacon_signal);
@@ -3469,6 +3401,86 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
sig, GFP_KERNEL);
}
}
+}
+
+static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+ size_t baselen;
+ struct ieee802_11_elems elems;
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_channel *chan;
+ struct sta_info *sta;
+ u32 changed = 0;
+ bool erp_valid;
+ u8 erp_value = 0;
+ u32 ncrc;
+ u8 *bssid;
+ u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
+
+ sdata_assert_lock(sdata);
+
+ /* Process beacon from the current BSS */
+ baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
+ if (baselen > len)
+ return;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (rx_status->freq != chanctx_conf->def.chan->center_freq) {
+ rcu_read_unlock();
+ return;
+ }
+ chan = chanctx_conf->def.chan;
+ rcu_read_unlock();
+
+ if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon &&
+ ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
+ ieee802_11_parse_elems(mgmt->u.beacon.variable,
+ len - baselen, false, &elems);
+
+ ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
+ if (elems.tim && !elems.parse_error) {
+ const struct ieee80211_tim_ie *tim_ie = elems.tim;
+ ifmgd->dtim_period = tim_ie->dtim_period;
+ }
+ ifmgd->have_beacon = true;
+ ifmgd->assoc_data->need_beacon = false;
+ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
+ sdata->vif.bss_conf.sync_tsf =
+ le64_to_cpu(mgmt->u.beacon.timestamp);
+ sdata->vif.bss_conf.sync_device_ts =
+ rx_status->device_timestamp;
+ if (elems.tim)
+ sdata->vif.bss_conf.sync_dtim_count =
+ elems.tim->dtim_count;
+ else
+ sdata->vif.bss_conf.sync_dtim_count = 0;
+ }
+ /* continue assoc process */
+ ifmgd->assoc_data->timeout = jiffies;
+ ifmgd->assoc_data->timeout_started = true;
+ run_again(sdata, ifmgd->assoc_data->timeout);
+ return;
+ }
+
+ if (!ifmgd->associated ||
+ !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
+ return;
+ bssid = ifmgd->associated->bssid;
+
+ if (!(rx_status->flag & RX_FLAG_NO_SIGNAL_VAL))
+ ieee80211_handle_beacon_sig(sdata, ifmgd, bss_conf,
+ local, rx_status);
if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) {
mlme_dbg_ratelimited(sdata,
@@ -4845,6 +4857,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
sdata->control_port_protocol = req->crypto.control_port_ethertype;
sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt;
+ sdata->control_port_over_nl80211 =
+ req->crypto.control_port_over_nl80211;
sdata->encrypt_headroom = ieee80211_cs_headroom(local, &req->crypto,
sdata->vif.type);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 9766c1cc4b0a..8221bc5582ab 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -690,7 +690,7 @@ minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
#ifdef CONFIG_MAC80211_DEBUGFS
mp->fixed_rate_idx = (u32) -1;
mp->dbg_fixed_rate = debugfs_create_u32("fixed_rate_idx",
- S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx);
+ 0666, debugfsdir, &mp->fixed_rate_idx);
#endif
minstrel_init_cck_rates(mp);
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 36fc971deb86..9ad7d63d3e5b 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -214,11 +214,11 @@ minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
{
struct minstrel_sta_info *mi = priv_sta;
- mi->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, mi,
- &minstrel_stat_fops);
+ mi->dbg_stats = debugfs_create_file("rc_stats", 0444, dir, mi,
+ &minstrel_stat_fops);
- mi->dbg_stats_csv = debugfs_create_file("rc_stats_csv", S_IRUGO, dir,
- mi, &minstrel_stat_csv_fops);
+ mi->dbg_stats_csv = debugfs_create_file("rc_stats_csv", 0444, dir, mi,
+ &minstrel_stat_csv_fops);
}
void
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 7d969e300fb3..bfcc03152dc6 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -303,10 +303,10 @@ minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
{
struct minstrel_ht_sta_priv *msp = priv_sta;
- msp->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, msp,
- &minstrel_ht_stat_fops);
- msp->dbg_stats_csv = debugfs_create_file("rc_stats_csv", S_IRUGO,
- dir, msp, &minstrel_ht_stat_csv_fops);
+ msp->dbg_stats = debugfs_create_file("rc_stats", 0444, dir, msp,
+ &minstrel_ht_stat_fops);
+ msp->dbg_stats_csv = debugfs_create_file("rc_stats_csv", 0444, dir, msp,
+ &minstrel_ht_stat_csv_fops);
}
void
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9c898a3688c6..03102aff0953 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2245,6 +2245,32 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
return true;
}
+static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
+ struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct net_device *dev = sdata->dev;
+
+ if (unlikely((skb->protocol == sdata->control_port_protocol ||
+ skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
+ sdata->control_port_over_nl80211)) {
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
+ struct ethhdr *ehdr = eth_hdr(skb);
+
+ cfg80211_rx_control_port(dev, skb->data, skb->len,
+ ehdr->h_source,
+ be16_to_cpu(skb->protocol), noencrypt);
+ dev_kfree_skb(skb);
+ } else {
+ /* deliver to local stack */
+ if (rx->napi)
+ napi_gro_receive(rx->napi, skb);
+ else
+ netif_receive_skb(skb);
+ }
+}
+
/*
* requires that rx->skb is a frame with ethernet header
*/
@@ -2329,13 +2355,10 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
#endif
if (skb) {
- /* deliver to local stack */
skb->protocol = eth_type_trans(skb, dev);
memset(skb->cb, 0, sizeof(skb->cb));
- if (rx->napi)
- napi_gro_receive(rx->napi, skb);
- else
- netif_receive_skb(skb);
+
+ ieee80211_deliver_skb_to_local_stack(skb, rx);
}
if (xmit_skb) {
@@ -2804,7 +2827,8 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
!(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
int sig = 0;
- if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
+ if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
+ !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
sig = status->signal;
cfg80211_report_obss_beacon(rx->local->hw.wiphy,
@@ -2882,7 +2906,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (rx->sta->sta.smps_mode == smps_mode)
goto handled;
rx->sta->sta.smps_mode = smps_mode;
- sta_opmode.smps_mode = smps_mode;
+ sta_opmode.smps_mode =
+ ieee80211_smps_mode_to_smps_mode(smps_mode);
sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
sband = rx->local->hw.wiphy->bands[status->band];
@@ -2920,7 +2945,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
rx->sta->sta.bandwidth = new_bw;
sband = rx->local->hw.wiphy->bands[status->band];
- sta_opmode.bw = new_bw;
+ sta_opmode.bw =
+ ieee80211_sta_rx_bw_to_chan_width(rx->sta);
sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
rate_control_rate_update(local, sband, rx->sta,
@@ -3145,7 +3171,8 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
* it transmitted were processed or returned.
*/
- if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
+ if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
+ !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
sig = status->signal;
if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index ef2becaade50..a3b1bcc2b461 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -73,7 +73,9 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
bool signal_valid;
struct ieee80211_sub_if_data *scan_sdata;
- if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
+ if (rx_status->flag & RX_FLAG_NO_SIGNAL_VAL)
+ bss_meta.signal = 0; /* invalid signal indication */
+ else if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
bss_meta.signal = rx_status->signal * 100;
else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC))
bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 933c67b5f845..535de3161a78 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -4757,3 +4757,49 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
ieee80211_xmit(sdata, NULL, skb);
local_bh_enable();
}
+
+int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *buf, size_t len,
+ const u8 *dest, __be16 proto, bool unencrypted)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ struct ethhdr *ehdr;
+ u32 flags;
+
+ /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
+ * or Pre-Authentication
+ */
+ if (proto != sdata->control_port_protocol &&
+ proto != cpu_to_be16(ETH_P_PREAUTH))
+ return -EINVAL;
+
+ if (unencrypted)
+ flags = IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ else
+ flags = 0;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ sizeof(struct ethhdr) + len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom + sizeof(struct ethhdr));
+
+ skb_put_data(skb, buf, len);
+
+ ehdr = skb_push(skb, sizeof(struct ethhdr));
+ memcpy(ehdr->h_dest, dest, ETH_ALEN);
+ memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN);
+ ehdr->h_proto = proto;
+
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_802_3);
+ skb_reset_network_header(skb);
+ skb_reset_mac_header(skb);
+
+ __ieee80211_subif_start_xmit(skb, skb->dev, flags);
+
+ return 0;
+}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 1f82191ce601..11f9cfc016d9 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -5,6 +5,7 @@
* Copyright 2007 Johannes Berg <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1113,6 +1114,48 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
return crc;
}
+void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_tx_queue_params
+ *qparam, int ac)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ const struct ieee80211_reg_rule *rrule;
+ struct ieee80211_wmm_ac *wmm_ac;
+ u16 center_freq = 0;
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_STATION)
+ return;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (chanctx_conf)
+ center_freq = chanctx_conf->def.chan->center_freq;
+
+ if (!center_freq) {
+ rcu_read_unlock();
+ return;
+ }
+
+ rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq));
+
+ if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) {
+ rcu_read_unlock();
+ return;
+ }
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP)
+ wmm_ac = &rrule->wmm_rule->ap[ac];
+ else
+ wmm_ac = &rrule->wmm_rule->client[ac];
+ qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min);
+ qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max);
+ qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn);
+ qparam->txop = !qparam->txop ? wmm_ac->cot / 32 :
+ min_t(u16, qparam->txop, wmm_ac->cot / 32);
+ rcu_read_unlock();
+}
+
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
bool bss_notify, bool enable_qos)
{
@@ -1206,6 +1249,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
break;
}
}
+ ieee80211_regulatory_limit_wmm_params(sdata, &qparam, ac);
qparam.uapsd = false;
@@ -1968,7 +2012,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
BSS_CHANGED_CQM |
BSS_CHANGED_QOS |
BSS_CHANGED_IDLE |
- BSS_CHANGED_TXPOWER;
+ BSS_CHANGED_TXPOWER |
+ BSS_CHANGED_MCAST_RATE;
if (sdata->vif.mu_mimo_owner)
changed |= BSS_CHANGED_MU_GROUPS;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 5714dee76b12..259325cbcc31 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -358,6 +358,36 @@ enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta)
return NL80211_CHAN_WIDTH_80;
}
+enum nl80211_chan_width
+ieee80211_sta_rx_bw_to_chan_width(struct sta_info *sta)
+{
+ enum ieee80211_sta_rx_bandwidth cur_bw = sta->sta.bandwidth;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
+ u32 cap_width;
+
+ switch (cur_bw) {
+ case IEEE80211_STA_RX_BW_20:
+ if (!sta->sta.ht_cap.ht_supported)
+ return NL80211_CHAN_WIDTH_20_NOHT;
+ else
+ return NL80211_CHAN_WIDTH_20;
+ case IEEE80211_STA_RX_BW_40:
+ return NL80211_CHAN_WIDTH_40;
+ case IEEE80211_STA_RX_BW_80:
+ return NL80211_CHAN_WIDTH_80;
+ case IEEE80211_STA_RX_BW_160:
+ cap_width =
+ vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+
+ if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ)
+ return NL80211_CHAN_WIDTH_160;
+
+ return NL80211_CHAN_WIDTH_80P80;
+ default:
+ return NL80211_CHAN_WIDTH_20;
+ }
+}
+
enum ieee80211_sta_rx_bandwidth
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
{
@@ -484,7 +514,7 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
new_bw = ieee80211_sta_cur_vht_bw(sta);
if (new_bw != sta->sta.bandwidth) {
sta->sta.bandwidth = new_bw;
- sta_opmode.bw = new_bw;
+ sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(sta);
changed |= IEEE80211_RC_BW_CHANGED;
sta_opmode.changed |= STA_OPMODE_MAX_BW_CHANGED;
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index d4a89a8be013..7a4de6d618b1 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2488,7 +2488,6 @@ static void mpls_net_exit(struct net *net)
static struct pernet_operations mpls_net_ops = {
.init = mpls_net_init,
.exit = mpls_net_exit,
- .async = true,
};
static struct rtnl_af_ops mpls_af_ops __read_mostly = {
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 05fcfb4fbe1d..8d7e849d4825 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -190,6 +190,10 @@ static int ncsi_pkg_info_nl(struct sk_buff *msg, struct genl_info *info)
package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
attr = nla_nest_start(skb, NCSI_ATTR_PACKAGE_LIST);
+ if (!attr) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
rc = ncsi_write_package_info(skb, ndp, package_id);
if (rc) {
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index d72cc786c7b7..0f6b8172fb9a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -629,7 +629,6 @@ static void __net_exit netfilter_net_exit(struct net *net)
static struct pernet_operations netfilter_net_ops = {
.init = netfilter_net_init,
.exit = netfilter_net_exit,
- .async = true,
};
int __init netfilter_init(void)
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 2523ebe2b3cc..bc4bd247bb7d 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -2095,7 +2095,6 @@ static struct pernet_operations ip_set_net_ops = {
.exit = ip_set_net_exit,
.id = &ip_set_net_id,
.size = sizeof(struct ip_set_net),
- .async = true,
};
static int __init
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 6a6cb9db030b..5f6f73cf2174 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -2289,12 +2289,10 @@ static struct pernet_operations ipvs_core_ops = {
.exit = __ip_vs_cleanup,
.id = &ip_vs_net_id,
.size = sizeof(struct netns_ipvs),
- .async = true,
};
static struct pernet_operations ipvs_core_dev_ops = {
.exit = __ip_vs_dev_cleanup,
- .async = true,
};
/*
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 8b25aab41928..58d5d05aec24 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -479,7 +479,6 @@ static void __ip_vs_ftp_exit(struct net *net)
static struct pernet_operations ip_vs_ftp_ops = {
.init = __ip_vs_ftp_init,
.exit = __ip_vs_ftp_exit,
- .async = true,
};
static int __init ip_vs_ftp_init(void)
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 6a340c94c4b8..d625179de485 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -604,7 +604,6 @@ static void __net_exit __ip_vs_lblc_exit(struct net *net) { }
static struct pernet_operations ip_vs_lblc_ops = {
.init = __ip_vs_lblc_init,
.exit = __ip_vs_lblc_exit,
- .async = true,
};
static int __init ip_vs_lblc_init(void)
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 0627881128da..84c57b62a588 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -789,7 +789,6 @@ static void __net_exit __ip_vs_lblcr_exit(struct net *net) { }
static struct pernet_operations ip_vs_lblcr_ops = {
.init = __ip_vs_lblcr_init,
.exit = __ip_vs_lblcr_exit,
- .async = true,
};
static int __init ip_vs_lblcr_init(void)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 705198de671d..41ff04ee2554 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1763,14 +1763,14 @@ nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
{
struct net *net;
- rtnl_lock();
+ down_read(&net_rwsem);
for_each_net(net) {
if (atomic_read(&net->ct.count) == 0)
continue;
__nf_ct_unconfirmed_destroy(net);
nf_queue_nf_hook_drop(net);
}
- rtnl_unlock();
+ up_read(&net_rwsem);
/* Need to wait for netns cleanup worker to finish, if its
* running -- it might have deleted a net namespace from
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index 496ce173f0c1..cc11bf890eb9 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -33,7 +33,7 @@ MODULE_ALIAS("ip_conntrack_netbios_ns");
MODULE_ALIAS_NFCT_HELPER("netbios_ns");
static unsigned int timeout __read_mostly = 3;
-module_param(timeout, uint, S_IRUSR);
+module_param(timeout, uint, 0400);
MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
static struct nf_conntrack_expect_policy exp_policy = {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 8884d302d33a..dd177ebee9aa 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -3417,7 +3417,6 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
static struct pernet_operations ctnetlink_net_ops = {
.init = ctnetlink_net_init,
.exit_batch = ctnetlink_net_exit_batch,
- .async = true,
};
static int __init ctnetlink_init(void)
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 9bcd72fe91f9..d049ea5a3770 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -406,7 +406,6 @@ static struct pernet_operations proto_gre_net_ops = {
.exit = proto_gre_net_exit,
.id = &proto_gre_net_id,
.size = sizeof(struct netns_proto_gre),
- .async = true,
};
static int __init nf_ct_proto_gre_init(void)
diff --git a/net/netfilter/nf_conntrack_snmp.c b/net/netfilter/nf_conntrack_snmp.c
index 87b95a2c270c..1b18f43ad226 100644
--- a/net/netfilter/nf_conntrack_snmp.c
+++ b/net/netfilter/nf_conntrack_snmp.c
@@ -26,7 +26,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_NFCT_HELPER("snmp");
static unsigned int timeout __read_mostly = 30;
-module_param(timeout, uint, S_IRUSR);
+module_param(timeout, uint, 0400);
MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
int (*nf_nat_snmp_hook)(struct sk_buff *skb,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 3cdce391362e..037fec54c850 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -495,7 +495,7 @@ static int nf_conntrack_standalone_init_proc(struct net *net)
if (uid_valid(root_uid) && gid_valid(root_gid))
proc_set_user(pde, root_uid, root_gid);
- pde = proc_create("nf_conntrack", S_IRUGO, net->proc_net_stat,
+ pde = proc_create("nf_conntrack", 0444, net->proc_net_stat,
&ct_cpu_seq_fops);
if (!pde)
goto out_stat_nf_conntrack;
@@ -705,7 +705,6 @@ static void nf_conntrack_pernet_exit(struct list_head *net_exit_list)
static struct pernet_operations nf_conntrack_net_ops = {
.init = nf_conntrack_pernet_init,
.exit_batch = nf_conntrack_pernet_exit,
- .async = true,
};
static int __init nf_conntrack_standalone_init(void)
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 1ba3da51050d..6d0357817cda 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -549,7 +549,7 @@ static int __net_init nf_log_net_init(struct net *net)
int ret = -ENOMEM;
#ifdef CONFIG_PROC_FS
- if (!proc_create("nf_log", S_IRUGO,
+ if (!proc_create("nf_log", 0444,
net->nf.proc_netfilter, &nflog_file_ops))
return ret;
#endif
@@ -577,7 +577,6 @@ static void __net_exit nf_log_net_exit(struct net *net)
static struct pernet_operations nf_log_net_ops = {
.init = nf_log_net_init,
.exit = nf_log_net_exit,
- .async = true,
};
int __init netfilter_log_init(void)
diff --git a/net/netfilter/nf_log_netdev.c b/net/netfilter/nf_log_netdev.c
index 254c2c6bde48..350eb147754d 100644
--- a/net/netfilter/nf_log_netdev.c
+++ b/net/netfilter/nf_log_netdev.c
@@ -47,7 +47,6 @@ static void __net_exit nf_log_netdev_net_exit(struct net *net)
static struct pernet_operations nf_log_netdev_net_ops = {
.init = nf_log_netdev_net_init,
.exit = nf_log_netdev_net_exit,
- .async = true,
};
static int __init nf_log_netdev_init(void)
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 64b875e452ca..6039b350abbe 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -325,7 +325,7 @@ static const struct file_operations synproxy_cpu_seq_fops = {
static int __net_init synproxy_proc_init(struct net *net)
{
- if (!proc_create("synproxy", S_IRUGO, net->proc_net_stat,
+ if (!proc_create("synproxy", 0444, net->proc_net_stat,
&synproxy_cpu_seq_fops))
return -ENOMEM;
return 0;
@@ -398,7 +398,6 @@ static struct pernet_operations synproxy_net_ops = {
.exit = synproxy_net_exit,
.id = &synproxy_net_id,
.size = sizeof(struct synproxy_net),
- .async = true,
};
static int __init synproxy_core_init(void)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fd13d28e4ca7..c4acc7340eb1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -6597,7 +6597,6 @@ static void __net_exit nf_tables_exit_net(struct net *net)
static struct pernet_operations nf_tables_net_ops = {
.init = nf_tables_init_net,
.exit = nf_tables_exit_net,
- .async = true,
};
static int __init nf_tables_module_init(void)
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 84fc4954862d..03ead8a9e90c 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -566,7 +566,6 @@ static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
static struct pernet_operations nfnetlink_net_ops = {
.init = nfnetlink_net_init,
.exit_batch = nfnetlink_net_exit_batch,
- .async = true,
};
static int __init nfnetlink_init(void)
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 8d9f18bb8840..88d427f9f9e6 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -515,7 +515,6 @@ static void __net_exit nfnl_acct_net_exit(struct net *net)
static struct pernet_operations nfnl_acct_ops = {
.init = nfnl_acct_net_init,
.exit = nfnl_acct_net_exit,
- .async = true,
};
static int __init nfnl_acct_init(void)
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 6819300f7fb7..95b04702a655 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -586,7 +586,6 @@ static void __net_exit cttimeout_net_exit(struct net *net)
static struct pernet_operations cttimeout_ops = {
.init = cttimeout_net_init,
.exit = cttimeout_net_exit,
- .async = true,
};
static int __init cttimeout_init(void)
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index b21ef79849a1..7b46aa4c478d 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1108,7 +1108,6 @@ static struct pernet_operations nfnl_log_net_ops = {
.exit = nfnl_log_net_exit,
.id = &nfnl_log_net_id,
.size = sizeof(struct nfnl_log_net),
- .async = true,
};
static int __init nfnetlink_log_init(void)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 9f572ed56208..0b839c38800f 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1525,7 +1525,6 @@ static struct pernet_operations nfnl_queue_net_ops = {
.exit_batch = nfnl_queue_net_exit_batch,
.id = &nfnl_queue_net_id,
.size = sizeof(struct nfnl_queue_net),
- .async = true,
};
static int __init nfnetlink_queue_init(void)
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 6de1f6a4cb80..4aa01c90e9d1 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1789,7 +1789,6 @@ static void __net_exit xt_net_exit(struct net *net)
static struct pernet_operations xt_net_ops = {
.init = xt_net_init,
.exit = xt_net_exit,
- .async = true,
};
static int __init xt_init(void)
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 1ac6600bfafd..5ee859193783 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -132,7 +132,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
ret = -ENOMEM;
goto out_free_timer;
}
- info->timer->attr.attr.mode = S_IRUGO;
+ info->timer->attr.attr.mode = 0444;
info->timer->attr.show = idletimer_tg_show;
ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index ef65b7a9173e..3360f13dc208 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -1349,7 +1349,6 @@ static struct pernet_operations hashlimit_net_ops = {
.exit = hashlimit_net_exit,
.id = &hashlimit_net_id,
.size = sizeof(struct hashlimit_net),
- .async = true,
};
static int __init hashlimit_mt_init(void)
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 486dd24da78b..9bbfc17ce3ec 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -51,8 +51,8 @@ static unsigned int ip_list_gid __read_mostly;
module_param(ip_list_tot, uint, 0400);
module_param(ip_list_hash_size, uint, 0400);
module_param(ip_list_perms, uint, 0400);
-module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR);
-module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR);
+module_param(ip_list_uid, uint, 0644);
+module_param(ip_list_gid, uint, 0644);
MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
@@ -687,7 +687,6 @@ static struct pernet_operations recent_net_ops = {
.exit = recent_net_exit,
.id = &recent_net_id,
.size = sizeof(struct recent_net),
- .async = true,
};
static struct xt_match recent_mt_reg[] __read_mostly = {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5d10dcfe6411..f1b02d87e336 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -253,7 +253,6 @@ static struct pernet_operations netlink_tap_net_ops = {
.exit = netlink_tap_exit_net,
.id = &netlink_tap_net_id,
.size = sizeof(struct netlink_tap_net),
- .async = true,
};
static bool netlink_filter_tap(const struct sk_buff *skb)
@@ -2726,7 +2725,6 @@ static void __init netlink_add_usersock_entry(void)
static struct pernet_operations __net_initdata netlink_net_ops = {
.init = netlink_net_init,
.exit = netlink_net_exit,
- .async = true,
};
static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index af51b8c0a2cb..b9ce82c9440f 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1035,7 +1035,6 @@ static void __net_exit genl_pernet_exit(struct net *net)
static struct pernet_operations genl_pernet_ops = {
.init = genl_pernet_init,
.exit = genl_pernet_exit,
- .async = true,
};
static int __init genl_init(void)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 35bb6807927f..4221d98a314b 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1450,9 +1450,9 @@ static int __init nr_proto_init(void)
nr_loopback_init();
- proc_create("nr", S_IRUGO, init_net.proc_net, &nr_info_fops);
- proc_create("nr_neigh", S_IRUGO, init_net.proc_net, &nr_neigh_fops);
- proc_create("nr_nodes", S_IRUGO, init_net.proc_net, &nr_nodes_fops);
+ proc_create("nr", 0444, init_net.proc_net, &nr_info_fops);
+ proc_create("nr_neigh", 0444, init_net.proc_net, &nr_neigh_fops);
+ proc_create("nr_nodes", 0444, init_net.proc_net, &nr_nodes_fops);
out:
return rc;
fail:
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 100191df0371..015e24e08909 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2363,10 +2363,10 @@ static void __net_exit ovs_exit_net(struct net *dnet)
list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
__dp_destroy(dp);
- rtnl_lock();
+ down_read(&net_rwsem);
for_each_net(net)
list_vports_from_net(net, dnet, &head);
- rtnl_unlock();
+ up_read(&net_rwsem);
/* Detach all vports from given namespace. */
list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
@@ -2384,7 +2384,6 @@ static struct pernet_operations ovs_net_ops = {
.exit = ovs_exit_net,
.id = &ovs_net_id,
.size = sizeof(struct ovs_net),
- .async = true,
};
static int __init dp_init(void)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2c5a6fe5d749..616cb9c18f88 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4557,7 +4557,6 @@ static void __net_exit packet_net_exit(struct net *net)
static struct pernet_operations packet_net_ops = {
.init = packet_net_init,
.exit = packet_net_exit,
- .async = true,
};
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 9454e8393793..77787512fc32 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -342,7 +342,6 @@ static struct pernet_operations phonet_net_ops = {
.exit = phonet_exit_net,
.id = &phonet_net_id,
.size = sizeof(struct phonet_net),
- .async = true,
};
/* Initialize Phonet devices list */
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 4f3a32c38bf5..351a28474667 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -530,7 +530,6 @@ static struct pernet_operations rds_tcp_net_ops = {
.exit = rds_tcp_exit_net,
.id = &rds_tcp_netid,
.size = sizeof(struct rds_tcp_net),
- .async = true,
};
void *rds_tcp_listen_sock_def_readable(struct net *net)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 5170373b797c..9ff5e0a76593 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1567,12 +1567,12 @@ static int __init rose_proto_init(void)
rose_add_loopback_neigh();
- proc_create("rose", S_IRUGO, init_net.proc_net, &rose_info_fops);
- proc_create("rose_neigh", S_IRUGO, init_net.proc_net,
+ proc_create("rose", 0444, init_net.proc_net, &rose_info_fops);
+ proc_create("rose_neigh", 0444, init_net.proc_net,
&rose_neigh_fops);
- proc_create("rose_nodes", S_IRUGO, init_net.proc_net,
+ proc_create("rose_nodes", 0444, init_net.proc_net,
&rose_nodes_fops);
- proc_create("rose_routes", S_IRUGO, init_net.proc_net,
+ proc_create("rose_routes", 0444, init_net.proc_net,
&rose_routes_fops);
out:
return rc;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 0c9c18aa7c77..ec5ec68be1aa 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -32,7 +32,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_RXRPC);
unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
-module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
+module_param_named(debug, rxrpc_debug, uint, 0644);
MODULE_PARM_DESC(debug, "RxRPC debugging mask");
static struct proto rxrpc_proto;
@@ -40,6 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops;
/* current debugging ID */
atomic_t rxrpc_debug_id;
+EXPORT_SYMBOL(rxrpc_debug_id);
/* count of skbs currently in use */
atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
@@ -267,6 +268,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
* @gfp: The allocation constraints
* @notify_rx: Where to send notifications instead of socket queue
* @upgrade: Request service upgrade for call
+ * @debug_id: The debug ID for tracing to be assigned to the call
*
* Allow a kernel service to begin a call on the nominated socket. This just
* sets up all the internal tracking structures and allocates connection and
@@ -282,7 +284,8 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
s64 tx_total_len,
gfp_t gfp,
rxrpc_notify_rx_t notify_rx,
- bool upgrade)
+ bool upgrade,
+ unsigned int debug_id)
{
struct rxrpc_conn_parameters cp;
struct rxrpc_call_params p;
@@ -314,7 +317,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
cp.exclusive = false;
cp.upgrade = upgrade;
cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp);
+ call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id);
/* The socket has been unlocked. */
if (!IS_ERR(call)) {
call->notify_rx = notify_rx;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 416688381eb7..21cf164b6d85 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -691,7 +691,6 @@ struct rxrpc_send_params {
* af_rxrpc.c
*/
extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
-extern atomic_t rxrpc_debug_id;
extern struct workqueue_struct *rxrpc_workqueue;
/*
@@ -732,11 +731,12 @@ extern unsigned int rxrpc_max_call_lifetime;
extern struct kmem_cache *rxrpc_call_jar;
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
-struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_conn_parameters *,
struct sockaddr_rxrpc *,
- struct rxrpc_call_params *, gfp_t);
+ struct rxrpc_call_params *, gfp_t,
+ unsigned int);
int rxrpc_retry_client_call(struct rxrpc_sock *,
struct rxrpc_call *,
struct rxrpc_conn_parameters *,
@@ -778,6 +778,7 @@ static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
call->error = error;
call->completion = compl,
call->state = RXRPC_CALL_COMPLETE;
+ trace_rxrpc_call_complete(call);
wake_up(&call->waitq);
return true;
}
@@ -822,7 +823,7 @@ static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
rxrpc_seq_t seq,
u32 abort_code, int error)
{
- trace_rxrpc_abort(why, call->cid, call->call_id, seq,
+ trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
abort_code, error);
return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
abort_code, error);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 3028298ca561..92ebd1d7e0bb 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -34,7 +34,8 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
struct rxrpc_backlog *b,
rxrpc_notify_rx_t notify_rx,
rxrpc_user_attach_call_t user_attach_call,
- unsigned long user_call_ID, gfp_t gfp)
+ unsigned long user_call_ID, gfp_t gfp,
+ unsigned int debug_id)
{
const void *here = __builtin_return_address(0);
struct rxrpc_call *call;
@@ -94,7 +95,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
/* Now it gets complicated, because calls get registered with the
* socket here, particularly if a user ID is preassigned by the user.
*/
- call = rxrpc_alloc_call(rx, gfp);
+ call = rxrpc_alloc_call(rx, gfp, debug_id);
if (!call)
return -ENOMEM;
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
@@ -174,7 +175,8 @@ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
if (rx->discard_new_call)
return 0;
- while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0)
+ while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
+ atomic_inc_return(&rxrpc_debug_id)) == 0)
;
return 0;
@@ -347,7 +349,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
service_id == rx->second_service))
goto found_service;
- trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_INVALID_OPERATION, EOPNOTSUPP);
skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
skb->priority = RX_INVALID_OPERATION;
@@ -358,7 +360,7 @@ found_service:
spin_lock(&rx->incoming_lock);
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
rx->sk.sk_state == RXRPC_CLOSE) {
- trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
+ trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
skb->priority = RX_INVALID_OPERATION;
@@ -635,6 +637,7 @@ out_discard:
* @user_attach_call: Func to attach call to user_call_ID
* @user_call_ID: The tag to attach to the preallocated call
* @gfp: The allocation conditions.
+ * @debug_id: The tracing debug ID.
*
* Charge up the socket with preallocated calls, each with a user ID. A
* function should be provided to effect the attachment from the user's side.
@@ -645,7 +648,8 @@ out_discard:
int rxrpc_kernel_charge_accept(struct socket *sock,
rxrpc_notify_rx_t notify_rx,
rxrpc_user_attach_call_t user_attach_call,
- unsigned long user_call_ID, gfp_t gfp)
+ unsigned long user_call_ID, gfp_t gfp,
+ unsigned int debug_id)
{
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct rxrpc_backlog *b = rx->backlog;
@@ -655,6 +659,6 @@ int rxrpc_kernel_charge_accept(struct socket *sock,
return rxrpc_service_prealloc_one(rx, b, notify_rx,
user_attach_call, user_call_ID,
- gfp);
+ gfp, debug_id);
}
EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index ad2ab1103189..6a62e42e1d8d 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -195,6 +195,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
* the packets in the Tx buffer we're going to resend and what the new
* resend timeout will be.
*/
+ trace_rxrpc_resend(call, (cursor + 1) & RXRPC_RXTX_BUFF_MASK);
oldest = now;
for (seq = cursor + 1; before_eq(seq, top); seq++) {
ix = seq & RXRPC_RXTX_BUFF_MASK;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 0b2db38dd32d..147657dfe757 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -99,7 +99,8 @@ found_extant_call:
/*
* allocate a new call
*/
-struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
+ unsigned int debug_id)
{
struct rxrpc_call *call;
@@ -138,7 +139,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
spin_lock_init(&call->notify_lock);
rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1);
- call->debug_id = atomic_inc_return(&rxrpc_debug_id);
+ call->debug_id = debug_id;
call->tx_total_len = -1;
call->next_rx_timo = 20 * HZ;
call->next_req_timo = 1 * HZ;
@@ -166,14 +167,15 @@ nomem:
*/
static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
struct sockaddr_rxrpc *srx,
- gfp_t gfp)
+ gfp_t gfp,
+ unsigned int debug_id)
{
struct rxrpc_call *call;
ktime_t now;
_enter("");
- call = rxrpc_alloc_call(rx, gfp);
+ call = rxrpc_alloc_call(rx, gfp, debug_id);
if (!call)
return ERR_PTR(-ENOMEM);
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
@@ -214,7 +216,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
struct rxrpc_call_params *p,
- gfp_t gfp)
+ gfp_t gfp,
+ unsigned int debug_id)
__releases(&rx->sk.sk_lock.slock)
{
struct rxrpc_call *call, *xcall;
@@ -225,7 +228,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
_enter("%p,%lx", rx, p->user_call_ID);
- call = rxrpc_alloc_client_call(rx, srx, gfp);
+ call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
if (IS_ERR(call)) {
release_sock(&rx->sk);
_leave(" = %ld", PTR_ERR(call));
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index b1dfae107431..d2ec3fd593e8 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -160,7 +160,8 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
lockdep_is_held(&conn->channel_lock));
if (call) {
if (compl == RXRPC_CALL_LOCALLY_ABORTED)
- trace_rxrpc_abort("CON", call->cid,
+ trace_rxrpc_abort(call->debug_id,
+ "CON", call->cid,
call->call_id, 0,
abort_code, error);
if (rxrpc_set_call_completion(call, compl,
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 6fc61400337f..2a868fdab0ae 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1307,21 +1307,21 @@ out_unlock:
wrong_security:
rcu_read_unlock();
- trace_rxrpc_abort("SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RXKADINCONSISTENCY, EBADMSG);
skb->priority = RXKADINCONSISTENCY;
goto post_abort;
reupgrade:
rcu_read_unlock();
- trace_rxrpc_abort("UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
goto protocol_error;
bad_message_unlock:
rcu_read_unlock();
bad_message:
- trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
protocol_error:
skb->priority = RX_PROTOCOL_ERROR;
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index 5fd939dabf41..f18c9248e0d4 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -106,5 +106,4 @@ struct pernet_operations rxrpc_net_ops = {
.exit = rxrpc_exit_net,
.id = &rxrpc_net_id,
.size = sizeof(struct rxrpc_net),
- .async = true,
};
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 09f2a3e05221..8503f279b467 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -579,7 +579,8 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
cp.exclusive = rx->exclusive | p->exclusive;
cp.upgrade = p->upgrade;
cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL);
+ call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL,
+ atomic_inc_return(&rxrpc_debug_id));
/* The socket is now unlocked */
_leave(" = %p\n", call);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 7bd1b964f021..0d78b58e1898 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1533,7 +1533,6 @@ static struct pernet_operations tcf_action_net_ops = {
.exit = tcf_action_net_exit,
.id = &tcf_action_net_id,
.size = sizeof(struct tcf_action_net),
- .async = true,
};
static int __init tc_action_init(void)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 5cb9b268e8ff..9092531d45d8 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -413,7 +413,6 @@ static struct pernet_operations bpf_net_ops = {
.exit_batch = bpf_exit_net,
.id = &bpf_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
static int __init bpf_init_module(void)
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 371e5e4ab3e2..e4b880fa51fe 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -222,7 +222,6 @@ static struct pernet_operations connmark_net_ops = {
.exit_batch = connmark_exit_net,
.id = &connmark_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
static int __init connmark_init_module(void)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index a527e287c086..7e28b2ce1437 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -678,7 +678,6 @@ static struct pernet_operations csum_net_ops = {
.exit_batch = csum_exit_net,
.id = &csum_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
MODULE_DESCRIPTION("Checksum updating actions");
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 88fbb8403565..4dc4f153cad8 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -261,7 +261,6 @@ static struct pernet_operations gact_net_ops = {
.exit_batch = gact_exit_net,
.id = &gact_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 555b1caeff72..a5994cf0512b 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -870,7 +870,6 @@ static struct pernet_operations ife_net_ops = {
.exit_batch = ife_exit_net,
.id = &ife_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
static int __init ife_init_module(void)
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index b5e8565b89c7..14c312d7908f 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -352,7 +352,6 @@ static struct pernet_operations ipt_net_ops = {
.exit_batch = ipt_exit_net,
.id = &ipt_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
@@ -403,7 +402,6 @@ static struct pernet_operations xt_net_ops = {
.exit_batch = xt_exit_net,
.id = &xt_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 64c86579c3d9..fd34015331ab 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -353,7 +353,6 @@ static struct pernet_operations mirred_net_ops = {
.exit_batch = mirred_exit_net,
.id = &mirred_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
MODULE_AUTHOR("Jamal Hadi Salim(2002)");
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index b1bc757f6491..4b5848b6c252 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -323,7 +323,6 @@ static struct pernet_operations nat_net_ops = {
.exit_batch = nat_exit_net,
.id = &nat_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
MODULE_DESCRIPTION("Stateless NAT actions");
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index f392ccaaa0d8..8a925c72db5f 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -465,7 +465,6 @@ static struct pernet_operations pedit_net_ops = {
.exit_batch = pedit_exit_net,
.id = &pedit_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 7081ec75e696..4e72bc2a0dfb 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -347,7 +347,6 @@ static struct pernet_operations police_net_ops = {
.exit_batch = police_exit_net,
.id = &police_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
static int __init police_init_module(void)
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 3a89f98f17e6..5db358497c9e 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -249,7 +249,6 @@ static struct pernet_operations sample_net_ops = {
.exit_batch = sample_exit_net,
.id = &sample_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
static int __init sample_init_module(void)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index e84768ae610a..9618b4a83cee 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -216,7 +216,6 @@ static struct pernet_operations simp_net_ops = {
.exit_batch = simp_exit_net,
.id = &simp_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
MODULE_AUTHOR("Jamal Hadi Salim(2005)");
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 7971510fe61b..ddf69fc01bdf 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -253,7 +253,6 @@ static struct pernet_operations skbedit_net_ops = {
.exit_batch = skbedit_exit_net,
.id = &skbedit_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
MODULE_AUTHOR("Alexander Duyck, <[email protected]>");
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 142a996ac776..bbcbdce732cc 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -279,7 +279,6 @@ static struct pernet_operations skbmod_net_ops = {
.exit_batch = skbmod_exit_net,
.id = &skbmod_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
MODULE_AUTHOR("Jamal Hadi Salim, <[email protected]>");
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index a1c8dd406a04..626dac81a48a 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -339,7 +339,6 @@ static struct pernet_operations tunnel_key_net_ops = {
.exit_batch = tunnel_key_exit_net,
.id = &tunnel_key_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
static int __init tunnel_key_init_module(void)
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 41a66effeb5f..853604685965 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -314,7 +314,6 @@ static struct pernet_operations vlan_net_ops = {
.exit_batch = vlan_exit_net,
.id = &vlan_net_id,
.size = sizeof(struct tc_action_net),
- .async = true,
};
static int __init vlan_init_module(void)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ec5fe8ec0c3e..b66754f52a9f 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1619,7 +1619,6 @@ static struct pernet_operations tcf_net_ops = {
.exit = tcf_net_exit,
.id = &tcf_net_id,
.size = sizeof(struct tcf_net),
- .async = true,
};
static int __init tc_filter_init(void)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 68f9d942bed4..106dae7e4818 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -2133,7 +2133,6 @@ static void __net_exit psched_net_exit(struct net *net)
static struct pernet_operations psched_net_ops = {
.init = psched_net_init,
.exit = psched_net_exit,
- .async = true,
};
static int __init pktsched_init(void)
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 8b3146816519..e2f5a3ee41a7 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -349,8 +349,8 @@ out:
/* Look for any peeled off association from the endpoint that matches the
* given peer address.
*/
-int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
- const union sctp_addr *paddr)
+bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
+ const union sctp_addr *paddr)
{
struct sctp_sockaddr_entry *addr;
struct sctp_bind_addr *bp;
@@ -362,10 +362,10 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
*/
list_for_each_entry(addr, &bp->address_list, list) {
if (sctp_has_association(net, &addr->a, paddr))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/* Do delayed input processing. This is scheduled by sctp_rcv().
diff --git a/net/sctp/input.c b/net/sctp/input.c
index b381d78548ac..ba8a6e6c36fa 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -1010,19 +1010,18 @@ struct sctp_association *sctp_lookup_association(struct net *net,
}
/* Is there an association matching the given local and peer addresses? */
-int sctp_has_association(struct net *net,
- const union sctp_addr *laddr,
- const union sctp_addr *paddr)
+bool sctp_has_association(struct net *net,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr)
{
- struct sctp_association *asoc;
struct sctp_transport *transport;
- if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
+ if (sctp_lookup_association(net, laddr, paddr, &transport)) {
sctp_transport_put(transport);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 17d0155d9de3..1d9ccc6dab2b 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -450,17 +450,17 @@ int __net_init sctp_proc_init(struct net *net)
net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net);
if (!net->sctp.proc_net_sctp)
return -ENOMEM;
- if (!proc_create("snmp", S_IRUGO, net->sctp.proc_net_sctp,
- &sctp_snmp_seq_fops))
+ if (!proc_create("snmp", 0444, net->sctp.proc_net_sctp,
+ &sctp_snmp_seq_fops))
goto cleanup;
- if (!proc_create("eps", S_IRUGO, net->sctp.proc_net_sctp,
- &sctp_eps_seq_fops))
+ if (!proc_create("eps", 0444, net->sctp.proc_net_sctp,
+ &sctp_eps_seq_fops))
goto cleanup;
- if (!proc_create("assocs", S_IRUGO, net->sctp.proc_net_sctp,
- &sctp_assocs_seq_fops))
+ if (!proc_create("assocs", 0444, net->sctp.proc_net_sctp,
+ &sctp_assocs_seq_fops))
goto cleanup;
- if (!proc_create("remaddr", S_IRUGO, net->sctp.proc_net_sctp,
- &sctp_remaddr_seq_fops))
+ if (!proc_create("remaddr", 0444, net->sctp.proc_net_sctp,
+ &sctp_remaddr_seq_fops))
goto cleanup;
return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 493b817f6a2a..a24cde236330 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1258,8 +1258,10 @@ static int __net_init sctp_defaults_init(struct net *net)
return 0;
+#ifdef CONFIG_PROC_FS
err_init_proc:
cleanup_sctp_mibs(net);
+#endif
err_init_mibs:
sctp_sysctl_net_unregister(net);
err_sysctl_register:
@@ -1283,7 +1285,6 @@ static void __net_exit sctp_defaults_exit(struct net *net)
static struct pernet_operations sctp_defaults_ops = {
.init = sctp_defaults_init,
.exit = sctp_defaults_exit,
- .async = true,
};
static int __net_init sctp_ctrlsock_init(struct net *net)
@@ -1307,7 +1308,6 @@ static void __net_init sctp_ctrlsock_exit(struct net *net)
static struct pernet_operations sctp_ctrlsock_ops = {
.init = sctp_ctrlsock_init,
.exit = sctp_ctrlsock_exit,
- .async = true,
};
/* Initialize the universe into something sensible. */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 26531193fce4..5089dbb96d58 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1375,7 +1375,7 @@ static int create_use_gss_proxy_proc_entry(struct net *net)
struct proc_dir_entry **p = &sn->use_gssp_proc;
sn->use_gss_proxy = -1;
- *p = proc_create_data("use-gss-proxy", S_IFREG|S_IRUSR|S_IWUSR,
+ *p = proc_create_data("use-gss-proxy", S_IFREG | 0600,
sn->proc_net_rpc,
&use_gss_proxy_ops, net);
if (!*p)
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 8a7e1c774f9c..c536cc24b3d1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1621,20 +1621,20 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
if (cd->procfs == NULL)
goto out_nomem;
- p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
+ p = proc_create_data("flush", S_IFREG | 0600,
cd->procfs, &cache_flush_operations_procfs, cd);
if (p == NULL)
goto out_nomem;
if (cd->cache_request || cd->cache_parse) {
- p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
- cd->procfs, &cache_file_operations_procfs, cd);
+ p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
+ &cache_file_operations_procfs, cd);
if (p == NULL)
goto out_nomem;
}
if (cd->cache_show) {
- p = proc_create_data("content", S_IFREG|S_IRUSR,
- cd->procfs, &content_file_operations_procfs, cd);
+ p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
+ &content_file_operations_procfs, cd);
if (p == NULL)
goto out_nomem;
}
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
index e980d2a493de..45a033329cd4 100644
--- a/net/sunrpc/debugfs.c
+++ b/net/sunrpc/debugfs.c
@@ -139,7 +139,7 @@ rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
return;
/* make tasks file */
- if (!debugfs_create_file("tasks", S_IFREG | S_IRUSR, clnt->cl_debugfs,
+ if (!debugfs_create_file("tasks", S_IFREG | 0400, clnt->cl_debugfs,
clnt, &tasks_fops))
goto out_err;
@@ -241,7 +241,7 @@ rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
return;
/* make tasks file */
- if (!debugfs_create_file("info", S_IFREG | S_IRUSR, xprt->debugfs,
+ if (!debugfs_create_file("info", S_IFREG | 0400, xprt->debugfs,
xprt, &xprt_info_fops)) {
debugfs_remove_recursive(xprt->debugfs);
xprt->debugfs = NULL;
@@ -317,7 +317,7 @@ inject_fault_dir(struct dentry *topdir)
if (!faultdir)
return NULL;
- if (!debugfs_create_file("disconnect", S_IFREG | S_IRUSR, faultdir,
+ if (!debugfs_create_file("disconnect", S_IFREG | 0400, faultdir,
NULL, &fault_disconnect_fops))
return NULL;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index fc97fc3ed637..0f08934b2cea 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -820,13 +820,13 @@ struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name,
{
struct dentry *dentry;
struct inode *dir = d_inode(parent);
- umode_t umode = S_IFIFO | S_IRUSR | S_IWUSR;
+ umode_t umode = S_IFIFO | 0600;
int err;
if (pipe->ops->upcall == NULL)
- umode &= ~S_IRUGO;
+ umode &= ~0444;
if (pipe->ops->downcall == NULL)
- umode &= ~S_IWUGO;
+ umode &= ~0222;
inode_lock_nested(dir, I_MUTEX_PARENT);
dentry = __rpc_lookup_create_exclusive(parent, name);
@@ -1035,7 +1035,7 @@ static const struct rpc_filelist authfiles[] = {
[RPCAUTH_info] = {
.name = "info",
.i_fop = &rpc_info_operations,
- .mode = S_IFREG | S_IRUSR,
+ .mode = S_IFREG | 0400,
},
};
@@ -1068,8 +1068,8 @@ struct dentry *rpc_create_client_dir(struct dentry *dentry,
{
struct dentry *ret;
- ret = rpc_mkdir_populate(dentry, name, S_IRUGO | S_IXUGO, NULL,
- rpc_clntdir_populate, rpc_client);
+ ret = rpc_mkdir_populate(dentry, name, 0555, NULL,
+ rpc_clntdir_populate, rpc_client);
if (!IS_ERR(ret)) {
rpc_client->cl_pipedir_objects.pdh_dentry = ret;
rpc_create_pipe_dir_objects(&rpc_client->cl_pipedir_objects);
@@ -1096,17 +1096,17 @@ static const struct rpc_filelist cache_pipefs_files[3] = {
[0] = {
.name = "channel",
.i_fop = &cache_file_operations_pipefs,
- .mode = S_IFREG|S_IRUSR|S_IWUSR,
+ .mode = S_IFREG | 0600,
},
[1] = {
.name = "content",
.i_fop = &content_file_operations_pipefs,
- .mode = S_IFREG|S_IRUSR,
+ .mode = S_IFREG | 0400,
},
[2] = {
.name = "flush",
.i_fop = &cache_flush_operations_pipefs,
- .mode = S_IFREG|S_IRUSR|S_IWUSR,
+ .mode = S_IFREG | 0600,
},
};
@@ -1164,39 +1164,39 @@ enum {
static const struct rpc_filelist files[] = {
[RPCAUTH_lockd] = {
.name = "lockd",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ .mode = S_IFDIR | 0555,
},
[RPCAUTH_mount] = {
.name = "mount",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ .mode = S_IFDIR | 0555,
},
[RPCAUTH_nfs] = {
.name = "nfs",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ .mode = S_IFDIR | 0555,
},
[RPCAUTH_portmap] = {
.name = "portmap",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ .mode = S_IFDIR | 0555,
},
[RPCAUTH_statd] = {
.name = "statd",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ .mode = S_IFDIR | 0555,
},
[RPCAUTH_nfsd4_cb] = {
.name = "nfsd4_cb",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ .mode = S_IFDIR | 0555,
},
[RPCAUTH_cache] = {
.name = "cache",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ .mode = S_IFDIR | 0555,
},
[RPCAUTH_nfsd] = {
.name = "nfsd",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ .mode = S_IFDIR | 0555,
},
[RPCAUTH_gssd] = {
.name = "gssd",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ .mode = S_IFDIR | 0555,
},
};
@@ -1261,7 +1261,7 @@ EXPORT_SYMBOL_GPL(rpc_put_sb_net);
static const struct rpc_filelist gssd_dummy_clnt_dir[] = {
[0] = {
.name = "clntXX",
- .mode = S_IFDIR | S_IRUGO | S_IXUGO,
+ .mode = S_IFDIR | 0555,
},
};
@@ -1310,7 +1310,7 @@ static const struct rpc_filelist gssd_dummy_info_file[] = {
[0] = {
.name = "info",
.i_fop = &rpc_dummy_info_operations,
- .mode = S_IFREG | S_IRUSR,
+ .mode = S_IFREG | 0400,
},
};
@@ -1397,7 +1397,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
sb->s_d_op = &simple_dentry_operations;
sb->s_time_gran = 1;
- inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
+ inode = rpc_get_inode(sb, S_IFDIR | 0555);
sb->s_root = root = d_make_root(inode);
if (!root)
return -ENOMEM;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index f424539829b7..9aed6fe1bf1a 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -89,7 +89,6 @@ static void __net_exit sysctl_net_exit(struct net *net)
static struct pernet_operations sysctl_pernet_ops = {
.init = sysctl_net_init,
.exit = sysctl_net_exit,
- .async = true,
};
static struct ctl_table_header *net_header;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 52dfc51ac4d5..5b38f5164281 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -109,7 +109,6 @@ static struct pernet_operations tipc_net_ops = {
.exit = tipc_exit_net,
.id = &tipc_net_id,
.size = sizeof(struct tipc_net),
- .async = true,
};
static int __init tipc_init(void)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 4a95c8c155c6..4fb4327311bb 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -333,8 +333,8 @@ static void tipc_node_write_unlock(struct tipc_node *n)
}
}
-struct tipc_node *tipc_node_create(struct net *net, u32 addr,
- u8 *peer_id, u16 capabilities)
+static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
+ u8 *peer_id, u16 capabilities)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *n, *temp_node;
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 2c13b18426d9..e7d91f5d5cae 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -687,7 +687,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
}
if (!tipc_own_id(net)) {
pr_warn("Failed to set node id, please configure manually\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err;
}
b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index bc2970a8e7f3..aded82da1aea 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2913,7 +2913,6 @@ static void __net_exit unix_net_exit(struct net *net)
static struct pernet_operations unix_net_ops = {
.init = unix_net_init,
.exit = unix_net_exit,
- .async = true,
};
static int __init af_unix_init(void)
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 63682176c96c..882d97bdc6bf 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -27,6 +27,7 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
err = rdev_stop_ap(rdev, dev);
if (!err) {
+ wdev->conn_owner_nlportid = 0;
wdev->beacon_interval = 0;
memset(&wdev->chandef, 0, sizeof(wdev->chandef));
wdev->ssid_len = 0;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index a48859982a32..2db713d18f71 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -579,6 +579,10 @@ static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy,
{
struct ieee80211_channel *c;
u32 freq, start_freq, end_freq;
+ bool dfs_offload;
+
+ dfs_offload = wiphy_ext_feature_isset(wiphy,
+ NL80211_EXT_FEATURE_DFS_OFFLOAD);
start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
@@ -596,8 +600,9 @@ static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy,
if (c->flags & IEEE80211_CHAN_DISABLED)
return false;
- if ((c->flags & IEEE80211_CHAN_RADAR) &&
- (c->dfs_state != NL80211_DFS_AVAILABLE))
+ if ((c->flags & IEEE80211_CHAN_RADAR) &&
+ (c->dfs_state != NL80211_DFS_AVAILABLE) &&
+ !(c->dfs_state == NL80211_DFS_USABLE && dfs_offload))
return false;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 670aa229168a..a6f3cac8c640 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1340,7 +1340,6 @@ static void __net_exit cfg80211_pernet_exit(struct net *net)
static struct pernet_operations cfg80211_pernet_ops = {
.exit = cfg80211_pernet_exit,
- .async = true,
};
static int __init cfg80211_init(void)
diff --git a/net/wireless/core.h b/net/wireless/core.h
index eaff636169c2..63eb1b5fdd04 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -282,10 +282,10 @@ void cfg80211_bss_age(struct cfg80211_registered_device *rdev,
unsigned long age_secs);
/* IBSS */
-int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct cfg80211_ibss_params *params,
- struct cfg80211_cached_keys *connkeys);
+int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_ibss_params *params,
+ struct cfg80211_cached_keys *connkeys);
void cfg80211_clear_ibss(struct net_device *dev, bool nowext);
int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool nowext);
@@ -303,10 +303,6 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct mesh_setup *setup,
const struct mesh_config *conf);
-int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct mesh_setup *setup,
- const struct mesh_config *conf);
int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev);
int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index a1d10993d08a..d1743e6abc34 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -84,14 +84,15 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
}
EXPORT_SYMBOL(cfg80211_ibss_joined);
-static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct cfg80211_ibss_params *params,
- struct cfg80211_cached_keys *connkeys)
+int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_ibss_params *params,
+ struct cfg80211_cached_keys *connkeys)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
+ ASSERT_RTNL();
ASSERT_WDEV_LOCK(wdev);
if (wdev->ssid_len)
@@ -146,23 +147,6 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
return 0;
}
-int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct cfg80211_ibss_params *params,
- struct cfg80211_cached_keys *connkeys)
-{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err;
-
- ASSERT_RTNL();
-
- wdev_lock(wdev);
- err = __cfg80211_join_ibss(rdev, dev, params, connkeys);
- wdev_unlock(wdev);
-
- return err;
-}
-
static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -224,6 +208,7 @@ int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
if (err)
return err;
+ wdev->conn_owner_nlportid = 0;
__cfg80211_clear_ibss(dev, nowext);
return 0;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index b12da6ef3c12..eac5aa1419fc 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -217,21 +217,6 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
return err;
}
-int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct mesh_setup *setup,
- const struct mesh_config *conf)
-{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err;
-
- wdev_lock(wdev);
- err = __cfg80211_join_mesh(rdev, dev, setup, conf);
- wdev_unlock(wdev);
-
- return err;
-}
-
int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
struct cfg80211_chan_def *chandef)
@@ -286,6 +271,7 @@ int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
err = rdev_leave_mesh(rdev, dev);
if (!err) {
+ wdev->conn_owner_nlportid = 0;
wdev->mesh_id_len = 0;
wdev->beacon_interval = 0;
memset(&wdev->chandef, 0, sizeof(wdev->chandef));
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index bbb9907bfa86..12b3edf70a7b 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -872,7 +872,7 @@ void cfg80211_cac_event(struct net_device *netdev,
trace_cfg80211_cac_event(netdev, event);
- if (WARN_ON(!wdev->cac_started))
+ if (WARN_ON(!wdev->cac_started && event != NL80211_RADAR_CAC_STARTED))
return;
if (WARN_ON(!wdev->chandef.chan))
@@ -888,14 +888,17 @@ void cfg80211_cac_event(struct net_device *netdev,
sizeof(struct cfg80211_chan_def));
queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk);
cfg80211_sched_dfs_chan_update(rdev);
- break;
+ /* fall through */
case NL80211_RADAR_CAC_ABORTED:
+ wdev->cac_started = false;
+ break;
+ case NL80211_RADAR_CAC_STARTED:
+ wdev->cac_started = true;
break;
default:
WARN_ON(1);
return;
}
- wdev->cac_started = false;
nl80211_radar_notify(rdev, chandef, event, netdev, gfp);
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a910150f8169..ff28f8feeb09 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -287,6 +287,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG },
[NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 },
[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG },
[NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
@@ -4134,6 +4135,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
wdev->chandef = params.chandef;
wdev->ssid_len = params.ssid_len;
memcpy(wdev->ssid, params.ssid, wdev->ssid_len);
+
+ if (info->attrs[NL80211_ATTR_SOCKET_OWNER])
+ wdev->conn_owner_nlportid = info->snd_portid;
}
wdev_unlock(wdev);
@@ -7551,12 +7555,13 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_chan_def chandef;
enum nl80211_dfs_regions dfs_region;
unsigned int cac_time_ms;
int err;
- dfs_region = reg_get_dfs_region(wdev->wiphy);
+ dfs_region = reg_get_dfs_region(wiphy);
if (dfs_region == NL80211_DFS_UNSET)
return -EINVAL;
@@ -7570,17 +7575,20 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
if (wdev->cac_started)
return -EBUSY;
- err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef,
- wdev->iftype);
+ err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype);
if (err < 0)
return err;
if (err == 0)
return -EINVAL;
- if (!cfg80211_chandef_dfs_usable(wdev->wiphy, &chandef))
+ if (!cfg80211_chandef_dfs_usable(wiphy, &chandef))
return -EINVAL;
+ /* CAC start is offloaded to HW and can't be started manually */
+ if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD))
+ return -EOPNOTSUPP;
+
if (!rdev->ops->start_radar_detection)
return -EOPNOTSUPP;
@@ -8204,6 +8212,22 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static int validate_pae_over_nl80211(struct cfg80211_registered_device *rdev,
+ struct genl_info *info)
+{
+ if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
+ GENL_SET_ERR_MSG(info, "SOCKET_OWNER not set");
+ return -EINVAL;
+ }
+
+ if (!rdev->ops->tx_control_port ||
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
struct genl_info *info,
struct cfg80211_crypto_settings *settings,
@@ -8227,6 +8251,15 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
} else
settings->control_port_ethertype = cpu_to_be16(ETH_P_PAE);
+ if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) {
+ int r = validate_pae_over_nl80211(rdev, info);
+
+ if (r < 0)
+ return r;
+
+ settings->control_port_over_nl80211 = true;
+ }
+
if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) {
void *data;
int len, i;
@@ -8672,12 +8705,26 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
ibss.control_port =
nla_get_flag(info->attrs[NL80211_ATTR_CONTROL_PORT]);
+ if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) {
+ int r = validate_pae_over_nl80211(rdev, info);
+
+ if (r < 0)
+ return r;
+
+ ibss.control_port_over_nl80211 = true;
+ }
+
ibss.userspace_handles_dfs =
nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS]);
- err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
+ wdev_lock(dev->ieee80211_ptr);
+ err = __cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
if (err)
kzfree(connkeys);
+ else if (info->attrs[NL80211_ATTR_SOCKET_OWNER])
+ dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid;
+ wdev_unlock(dev->ieee80211_ptr);
+
return err;
}
@@ -10083,7 +10130,7 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
} else {
- /* cfg80211_join_mesh() will sort it out */
+ /* __cfg80211_join_mesh() will sort it out */
setup.chandef.chan = NULL;
}
@@ -10121,7 +10168,22 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
setup.userspace_handles_dfs =
nla_get_flag(info->attrs[NL80211_ATTR_HANDLE_DFS]);
- return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
+ if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) {
+ int r = validate_pae_over_nl80211(rdev, info);
+
+ if (r < 0)
+ return r;
+
+ setup.control_port_over_nl80211 = true;
+ }
+
+ wdev_lock(dev->ieee80211_ptr);
+ err = __cfg80211_join_mesh(rdev, dev, &setup, &cfg);
+ if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER])
+ dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid;
+ wdev_unlock(dev->ieee80211_ptr);
+
+ return err;
}
static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
@@ -12517,6 +12579,68 @@ static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info)
return rdev_external_auth(rdev, dev, &params);
}
+static int nl80211_tx_control_port(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ const u8 *buf;
+ size_t len;
+ u8 *dest;
+ u16 proto;
+ bool noencrypt;
+ int err;
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211))
+ return -EOPNOTSUPP;
+
+ if (!rdev->ops->tx_control_port)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_FRAME] ||
+ !info->attrs[NL80211_ATTR_MAC] ||
+ !info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) {
+ GENL_SET_ERR_MSG(info, "Frame, MAC or ethertype missing");
+ return -EINVAL;
+ }
+
+ wdev_lock(wdev);
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (wdev->current_bss)
+ break;
+ err = -ENOTCONN;
+ goto out;
+ default:
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ wdev_unlock(wdev);
+
+ buf = nla_data(info->attrs[NL80211_ATTR_FRAME]);
+ len = nla_len(info->attrs[NL80211_ATTR_FRAME]);
+ dest = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ proto = nla_get_u16(info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]);
+ noencrypt =
+ nla_get_flag(info->attrs[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT]);
+
+ return rdev_tx_control_port(rdev, dev, buf, len,
+ dest, cpu_to_be16(proto), noencrypt);
+
+ out:
+ wdev_unlock(wdev);
+ return err;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -13420,7 +13544,14 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
-
+ {
+ .cmd = NL80211_CMD_CONTROL_PORT_FRAME,
+ .doit = nl80211_tx_control_port,
+ .policy = nl80211_policy,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_family nl80211_fam __ro_after_init = {
@@ -14535,6 +14666,64 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
}
EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
+static int __nl80211_rx_control_port(struct net_device *dev,
+ const u8 *buf, size_t len,
+ const u8 *addr, u16 proto,
+ bool unencrypted, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+ u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid);
+
+ if (!nlportid)
+ return -ENOENT;
+
+ msg = nlmsg_new(100 + len, gfp);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONTROL_PORT_FRAME);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return -ENOBUFS;
+ }
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+ NL80211_ATTR_PAD) ||
+ nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
+ nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) ||
+ (unencrypted && nla_put_flag(msg,
+ NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT)))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
+
+ nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+
+bool cfg80211_rx_control_port(struct net_device *dev,
+ const u8 *buf, size_t len,
+ const u8 *addr, u16 proto, bool unencrypted)
+{
+ int ret;
+
+ trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted);
+ ret = __nl80211_rx_control_port(dev, buf, len, addr, proto,
+ unencrypted, GFP_ATOMIC);
+ trace_cfg80211_return_bool(ret == 0);
+ return ret == 0;
+}
+EXPORT_SYMBOL(cfg80211_rx_control_port);
+
static struct sk_buff *cfg80211_prepare_cqm(struct net_device *dev,
const char *mac, gfp_t gfp)
{
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 84f23ae015fc..87479a53411b 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -714,6 +714,21 @@ static inline int rdev_mgmt_tx(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int rdev_tx_control_port(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ const void *buf, size_t len,
+ const u8 *dest, __be16 proto,
+ const bool noencrypt)
+{
+ int ret;
+ trace_rdev_tx_control_port(&rdev->wiphy, dev, buf, len,
+ dest, proto, noencrypt);
+ ret = rdev->ops->tx_control_port(&rdev->wiphy, dev, buf, len,
+ dest, proto, noencrypt);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
static inline int
rdev_mgmt_tx_cancel_wait(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, u64 cookie)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 7b42f0bacfd8..16c7e4ef5820 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -5,6 +5,7 @@
* Copyright 2008-2011 Luis R. Rodriguez <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -134,12 +135,12 @@ static void restore_regulatory_settings(bool reset_user);
static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
{
- return rtnl_dereference(cfg80211_regdomain);
+ return rcu_dereference_rtnl(cfg80211_regdomain);
}
const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
{
- return rtnl_dereference(wiphy->regd);
+ return rcu_dereference_rtnl(wiphy->regd);
}
static const char *reg_dfs_region_str(enum nl80211_dfs_regions dfs_region)
@@ -424,23 +425,36 @@ static const struct ieee80211_regdomain *
reg_copy_regd(const struct ieee80211_regdomain *src_regd)
{
struct ieee80211_regdomain *regd;
- int size_of_regd;
+ int size_of_regd, size_of_wmms;
unsigned int i;
+ struct ieee80211_wmm_rule *d_wmm, *s_wmm;
size_of_regd =
sizeof(struct ieee80211_regdomain) +
src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule);
+ size_of_wmms = src_regd->n_wmm_rules *
+ sizeof(struct ieee80211_wmm_rule);
- regd = kzalloc(size_of_regd, GFP_KERNEL);
+ regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
if (!regd)
return ERR_PTR(-ENOMEM);
memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
- for (i = 0; i < src_regd->n_reg_rules; i++)
+ d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
+ s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd);
+ memcpy(d_wmm, s_wmm, size_of_wmms);
+
+ for (i = 0; i < src_regd->n_reg_rules; i++) {
memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
sizeof(struct ieee80211_reg_rule));
+ if (!src_regd->reg_rules[i].wmm_rule)
+ continue;
+ regd->reg_rules[i].wmm_rule = d_wmm +
+ (src_regd->reg_rules[i].wmm_rule - s_wmm) /
+ sizeof(struct ieee80211_wmm_rule);
+ }
return regd;
}
@@ -595,6 +609,17 @@ enum fwdb_flags {
FWDB_FLAG_AUTO_BW = BIT(4),
};
+struct fwdb_wmm_ac {
+ u8 ecw;
+ u8 aifsn;
+ __be16 cot;
+} __packed;
+
+struct fwdb_wmm_rule {
+ struct fwdb_wmm_ac client[IEEE80211_NUM_ACS];
+ struct fwdb_wmm_ac ap[IEEE80211_NUM_ACS];
+} __packed;
+
struct fwdb_rule {
u8 len;
u8 flags;
@@ -602,6 +627,7 @@ struct fwdb_rule {
__be32 start, end, max_bw;
/* start of optional data */
__be16 cac_timeout;
+ __be16 wmm_ptr;
} __packed __aligned(4);
#define FWDB_MAGIC 0x52474442
@@ -613,6 +639,31 @@ struct fwdb_header {
struct fwdb_country country[];
} __packed __aligned(4);
+static int ecw2cw(int ecw)
+{
+ return (1 << ecw) - 1;
+}
+
+static bool valid_wmm(struct fwdb_wmm_rule *rule)
+{
+ struct fwdb_wmm_ac *ac = (struct fwdb_wmm_ac *)rule;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS * 2; i++) {
+ u16 cw_min = ecw2cw((ac[i].ecw & 0xf0) >> 4);
+ u16 cw_max = ecw2cw(ac[i].ecw & 0x0f);
+ u8 aifsn = ac[i].aifsn;
+
+ if (cw_min >= cw_max)
+ return false;
+
+ if (aifsn < 1)
+ return false;
+ }
+
+ return true;
+}
+
static bool valid_rule(const u8 *data, unsigned int size, u16 rule_ptr)
{
struct fwdb_rule *rule = (void *)(data + (rule_ptr << 2));
@@ -623,7 +674,18 @@ static bool valid_rule(const u8 *data, unsigned int size, u16 rule_ptr)
/* mandatory fields */
if (rule->len < offsetofend(struct fwdb_rule, max_bw))
return false;
+ if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
+ u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
+ struct fwdb_wmm_rule *wmm;
+
+ if (wmm_ptr + sizeof(struct fwdb_wmm_rule) > size)
+ return false;
+ wmm = (void *)(data + wmm_ptr);
+
+ if (!valid_wmm(wmm))
+ return false;
+ }
return true;
}
@@ -798,23 +860,118 @@ static bool valid_regdb(const u8 *data, unsigned int size)
return true;
}
+static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
+ struct fwdb_wmm_rule *wmm)
+{
+ unsigned int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ rule->client[i].cw_min =
+ ecw2cw((wmm->client[i].ecw & 0xf0) >> 4);
+ rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f);
+ rule->client[i].aifsn = wmm->client[i].aifsn;
+ rule->client[i].cot = 1000 * be16_to_cpu(wmm->client[i].cot);
+ rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4);
+ rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f);
+ rule->ap[i].aifsn = wmm->ap[i].aifsn;
+ rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
+ }
+}
+
+static int __regdb_query_wmm(const struct fwdb_header *db,
+ const struct fwdb_country *country, int freq,
+ u32 *dbptr, struct ieee80211_wmm_rule *rule)
+{
+ unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
+ struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
+ int i;
+
+ for (i = 0; i < coll->n_rules; i++) {
+ __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2));
+ unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2;
+ struct fwdb_rule *rrule = (void *)((u8 *)db + rule_ptr);
+ struct fwdb_wmm_rule *wmm;
+ unsigned int wmm_ptr;
+
+ if (rrule->len < offsetofend(struct fwdb_rule, wmm_ptr))
+ continue;
+
+ if (freq >= KHZ_TO_MHZ(be32_to_cpu(rrule->start)) &&
+ freq <= KHZ_TO_MHZ(be32_to_cpu(rrule->end))) {
+ wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2;
+ wmm = (void *)((u8 *)db + wmm_ptr);
+ set_wmm_rule(rule, wmm);
+ if (dbptr)
+ *dbptr = wmm_ptr;
+ return 0;
+ }
+ }
+
+ return -ENODATA;
+}
+
+int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
+ struct ieee80211_wmm_rule *rule)
+{
+ const struct fwdb_header *hdr = regdb;
+ const struct fwdb_country *country;
+
+ if (IS_ERR(regdb))
+ return PTR_ERR(regdb);
+
+ country = &hdr->country[0];
+ while (country->coll_ptr) {
+ if (alpha2_equal(alpha2, country->alpha2))
+ return __regdb_query_wmm(regdb, country, freq, dbptr,
+ rule);
+
+ country++;
+ }
+
+ return -ENODATA;
+}
+EXPORT_SYMBOL(reg_query_regdb_wmm);
+
+struct wmm_ptrs {
+ struct ieee80211_wmm_rule *rule;
+ u32 ptr;
+};
+
+static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs,
+ u32 wmm_ptr, int n_wmms)
+{
+ int i;
+
+ for (i = 0; i < n_wmms; i++) {
+ if (wmm_ptrs[i].ptr == wmm_ptr)
+ return wmm_ptrs[i].rule;
+ }
+ return NULL;
+}
+
static int regdb_query_country(const struct fwdb_header *db,
const struct fwdb_country *country)
{
unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
struct ieee80211_regdomain *regdom;
- unsigned int size_of_regd;
- unsigned int i;
+ struct ieee80211_regdomain *tmp_rd;
+ unsigned int size_of_regd, i, n_wmms = 0;
+ struct wmm_ptrs *wmm_ptrs;
- size_of_regd =
- sizeof(struct ieee80211_regdomain) +
+ size_of_regd = sizeof(struct ieee80211_regdomain) +
coll->n_rules * sizeof(struct ieee80211_reg_rule);
regdom = kzalloc(size_of_regd, GFP_KERNEL);
if (!regdom)
return -ENOMEM;
+ wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL);
+ if (!wmm_ptrs) {
+ kfree(regdom);
+ return -ENOMEM;
+ }
+
regdom->n_reg_rules = coll->n_rules;
regdom->alpha2[0] = country->alpha2[0];
regdom->alpha2[1] = country->alpha2[1];
@@ -851,7 +1008,38 @@ static int regdb_query_country(const struct fwdb_header *db,
if (rule->len >= offsetofend(struct fwdb_rule, cac_timeout))
rrule->dfs_cac_ms =
1000 * be16_to_cpu(rule->cac_timeout);
+ if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
+ u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
+ struct ieee80211_wmm_rule *wmm_pos =
+ find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms);
+ struct fwdb_wmm_rule *wmm;
+ struct ieee80211_wmm_rule *wmm_rule;
+
+ if (wmm_pos) {
+ rrule->wmm_rule = wmm_pos;
+ continue;
+ }
+ wmm = (void *)((u8 *)db + wmm_ptr);
+ tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) *
+ sizeof(struct ieee80211_wmm_rule),
+ GFP_KERNEL);
+
+ if (!tmp_rd) {
+ kfree(regdom);
+ return -ENOMEM;
+ }
+ regdom = tmp_rd;
+
+ wmm_rule = (struct ieee80211_wmm_rule *)
+ ((u8 *)regdom + size_of_regd + n_wmms *
+ sizeof(struct ieee80211_wmm_rule));
+
+ set_wmm_rule(wmm_rule, wmm);
+ wmm_ptrs[n_wmms].ptr = wmm_ptr;
+ wmm_ptrs[n_wmms++].rule = wmm_rule;
+ }
}
+ kfree(wmm_ptrs);
return reg_schedule_apply(regdom);
}
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 701cfd7acc1b..5df6b33db786 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -1239,17 +1239,38 @@ void cfg80211_autodisconnect_wk(struct work_struct *work)
wdev_lock(wdev);
if (wdev->conn_owner_nlportid) {
- /*
- * Use disconnect_bssid if still connecting and ops->disconnect
- * not implemented. Otherwise we can use cfg80211_disconnect.
- */
- if (rdev->ops->disconnect || wdev->current_bss)
- cfg80211_disconnect(rdev, wdev->netdev,
- WLAN_REASON_DEAUTH_LEAVING, true);
- else
- cfg80211_mlme_deauth(rdev, wdev->netdev,
- wdev->disconnect_bssid, NULL, 0,
- WLAN_REASON_DEAUTH_LEAVING, false);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ cfg80211_leave_ibss(rdev, wdev->netdev, false);
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ cfg80211_stop_ap(rdev, wdev->netdev, false);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ cfg80211_leave_mesh(rdev, wdev->netdev);
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ /*
+ * Use disconnect_bssid if still connecting and
+ * ops->disconnect not implemented. Otherwise we can
+ * use cfg80211_disconnect.
+ */
+ if (rdev->ops->disconnect || wdev->current_bss)
+ cfg80211_disconnect(rdev, wdev->netdev,
+ WLAN_REASON_DEAUTH_LEAVING,
+ true);
+ else
+ cfg80211_mlme_deauth(rdev, wdev->netdev,
+ wdev->disconnect_bssid,
+ NULL, 0,
+ WLAN_REASON_DEAUTH_LEAVING,
+ false);
+ break;
+ default:
+ break;
+ }
}
wdev_unlock(wdev);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 5152938b358d..a64291ae52a6 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1882,6 +1882,32 @@ TRACE_EVENT(rdev_mgmt_tx,
BOOL_TO_STR(__entry->dont_wait_for_ack))
);
+TRACE_EVENT(rdev_tx_control_port,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ const u8 *buf, size_t len, const u8 *dest, __be16 proto,
+ bool unencrypted),
+ TP_ARGS(wiphy, netdev, buf, len, dest, proto, unencrypted),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(dest)
+ __field(__be16, proto)
+ __field(bool, unencrypted)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(dest, dest);
+ __entry->proto = proto;
+ __entry->unencrypted = unencrypted;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ","
+ " proto: 0x%x, unencrypted: %s",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(dest),
+ be16_to_cpu(__entry->proto),
+ BOOL_TO_STR(__entry->unencrypted))
+);
+
TRACE_EVENT(rdev_set_noack_map,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
u16 noack_map),
@@ -2600,6 +2626,27 @@ TRACE_EVENT(cfg80211_mgmt_tx_status,
WDEV_PR_ARG, __entry->cookie, BOOL_TO_STR(__entry->ack))
);
+TRACE_EVENT(cfg80211_rx_control_port,
+ TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len,
+ const u8 *addr, u16 proto, bool unencrypted),
+ TP_ARGS(netdev, buf, len, addr, proto, unencrypted),
+ TP_STRUCT__entry(
+ NETDEV_ENTRY
+ MAC_ENTRY(addr)
+ __field(u16, proto)
+ __field(bool, unencrypted)
+ ),
+ TP_fast_assign(
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(addr, addr);
+ __entry->proto = proto;
+ __entry->unencrypted = unencrypted;
+ ),
+ TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s",
+ NETDEV_PR_ARG, MAC_PR_ARG(addr),
+ __entry->proto, BOOL_TO_STR(__entry->unencrypted))
+);
+
TRACE_EVENT(cfg80211_cqm_rssi_notify,
TP_PROTO(struct net_device *netdev,
enum nl80211_cqm_rssi_threshold_event rssi_event,
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index bc7064486b15..5e677dac2a0c 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -347,13 +347,13 @@ void wireless_nlevent_flush(void)
struct sk_buff *skb;
struct net *net;
- ASSERT_RTNL();
-
+ down_read(&net_rwsem);
for_each_net(net) {
while ((skb = skb_dequeue(&net->wext_nlevents)))
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
GFP_KERNEL);
}
+ up_read(&net_rwsem);
}
EXPORT_SYMBOL_GPL(wireless_nlevent_flush);
@@ -390,7 +390,6 @@ static void __net_exit wext_pernet_exit(struct net *net)
static struct pernet_operations wext_pernet_ops = {
.init = wext_pernet_init,
.exit = wext_pernet_exit,
- .async = true,
};
static int __init wireless_nlevent_init(void)
@@ -411,9 +410,7 @@ subsys_initcall(wireless_nlevent_init);
/* Process events generated by the wireless layer or the driver. */
static void wireless_nlevent_process(struct work_struct *work)
{
- rtnl_lock();
wireless_nlevent_flush();
- rtnl_unlock();
}
static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process);
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index 5511f989ef47..b4c464594a5e 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -142,7 +142,7 @@ static const struct file_operations wireless_seq_fops = {
int __net_init wext_proc_init(struct net *net)
{
/* Create /proc/net/wireless entry */
- if (!proc_create("wireless", S_IRUGO, net->proc_net,
+ if (!proc_create("wireless", 0444, net->proc_net,
&wireless_seq_fops))
return -ENOMEM;
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 0917f047f2cf..64b415e93f6a 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -212,16 +212,16 @@ int __init x25_proc_init(void)
if (!proc_mkdir("x25", init_net.proc_net))
return -ENOMEM;
- if (!proc_create("x25/route", S_IRUGO, init_net.proc_net,
- &x25_seq_route_fops))
+ if (!proc_create("x25/route", 0444, init_net.proc_net,
+ &x25_seq_route_fops))
goto out;
- if (!proc_create("x25/socket", S_IRUGO, init_net.proc_net,
- &x25_seq_socket_fops))
+ if (!proc_create("x25/socket", 0444, init_net.proc_net,
+ &x25_seq_socket_fops))
goto out;
- if (!proc_create("x25/forward", S_IRUGO, init_net.proc_net,
- &x25_seq_forward_fops))
+ if (!proc_create("x25/forward", 0444, init_net.proc_net,
+ &x25_seq_forward_fops))
goto out;
return 0;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 1472c0857975..44fc54dc013c 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -9,6 +9,7 @@
*/
#include <linux/bottom_half.h>
+#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -31,7 +32,7 @@ struct xfrm_trans_cb {
#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
-static struct kmem_cache *secpath_cachep __read_mostly;
+static struct kmem_cache *secpath_cachep __ro_after_init;
static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cb3bb9ae4407..0e065db6c7c0 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -51,7 +51,7 @@ static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
__read_mostly;
-static struct kmem_cache *xfrm_dst_cache __read_mostly;
+static struct kmem_cache *xfrm_dst_cache __ro_after_init;
static __read_mostly seqcount_t xfrm_policy_hash_generation;
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
@@ -1743,7 +1743,7 @@ static void xfrm_pcpu_work_fn(struct work_struct *work)
void xfrm_policy_cache_flush(void)
{
struct xfrm_dst *old;
- bool found = 0;
+ bool found = false;
int cpu;
might_sleep();
@@ -2985,7 +2985,6 @@ static void __net_exit xfrm_net_exit(struct net *net)
static struct pernet_operations __net_initdata xfrm_net_ops = {
.init = xfrm_net_init,
.exit = xfrm_net_exit,
- .async = true,
};
void __init xfrm_init(void)
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index 6d5f85f4e672..ed06903cd84d 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -79,7 +79,7 @@ static const struct file_operations xfrm_statistics_seq_fops = {
int __net_init xfrm_proc_init(struct net *net)
{
- if (!proc_create("xfrm_stat", S_IRUGO, net->proc_net,
+ if (!proc_create("xfrm_stat", 0444, net->proc_net,
&xfrm_statistics_seq_fops))
return -ENOMEM;
return 0;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index e92b8c019c88..080035f056d9 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -3253,7 +3253,6 @@ static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
static struct pernet_operations xfrm_user_net_ops = {
.init = xfrm_user_net_init,
.exit_batch = xfrm_user_net_exit,
- .async = true,
};
static int __init xfrm_user_init(void)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index b4d7b6242a40..8644d864e3c1 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -6743,7 +6743,6 @@ static void __net_exit selinux_nf_unregister(struct net *net)
static struct pernet_operations selinux_net_ops = {
.init = selinux_nf_register,
.exit = selinux_nf_unregister,
- .async = true,
};
static int __init selinux_nf_ip_init(void)
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 1f173a7a4daa..a0b465316292 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -47,10 +47,10 @@ static inline void selinux_xfrm_notify_policyload(void)
{
struct net *net;
- rtnl_lock();
+ down_read(&net_rwsem);
for_each_net(net)
rt_genid_bump_all(net);
- rtnl_unlock();
+ up_read(&net_rwsem);
}
#else
static inline int selinux_xfrm_enabled(void)
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
index 3f29c03162ca..e36d17835d4f 100644
--- a/security/smack/smack_netfilter.c
+++ b/security/smack/smack_netfilter.c
@@ -89,7 +89,6 @@ static void __net_exit smack_nf_unregister(struct net *net)
static struct pernet_operations smack_net_ops = {
.init = smack_nf_register,
.exit = smack_nf_unregister,
- .async = true,
};
static int __init smack_nf_ip_init(void)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
index ae96d0350d7e..68c91023cdb9 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
@@ -481,7 +481,7 @@
255
]
],
- "cmdUnderTest": "for i in `seq 1 32`; do cmd=\"action pass index $i \"; args=\"$args$cmd\"; done && $TC actions add $args",
+ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action pass index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
"expExitCode": "0",
"verifyCmd": "$TC actions list action gact",
"matchPattern": "^[ \t]+index [0-9]+ ref",
@@ -505,7 +505,7 @@
255
]
],
- "cmdUnderTest": "for i in `seq 1 32`; do cmd=\"action continue index $i cookie aabbccddeeff112233445566778800a1 \"; args=\"$args$cmd\"; done && $TC actions add $args",
+ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action continue index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
"expExitCode": "0",
"verifyCmd": "$TC actions list action gact",
"matchPattern": "^[ \t]+index [0-9]+ ref",
@@ -528,13 +528,13 @@
1,
255
],
- "for i in `seq 1 32`; do cmd=\"action continue index $i \"; args=\"$args$cmd\"; done && $TC actions add $args"
+ "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action continue index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
],
- "cmdUnderTest": "for i in `seq 1 32`; do cmd=\"action gact index $i \"; args=\"$args$cmd\"; done && $TC actions del $args",
+ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action gact index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
"expExitCode": "0",
"verifyCmd": "$TC actions list action gact",
"matchPattern": "^[ \t]+index [0-9]+ ref",
"matchCount": "0",
"teardown": []
}
-] \ No newline at end of file
+]