aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml1
-rw-r--r--Documentation/netlink/specs/devlink.yaml457
-rw-r--r--Documentation/netlink/specs/ovs_vport.yaml18
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst6
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst313
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst1
-rw-r--r--Documentation/networking/devlink/mlx5.rst182
-rw-r--r--Documentation/networking/phy.rst4
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/block/drbd/drbd_nl.c9
-rw-r--r--drivers/bluetooth/btbcm.c5
-rw-r--r--drivers/bluetooth/btintel.c198
-rw-r--r--drivers/bluetooth/btintel.h3
-rw-r--r--drivers/bluetooth/btmtk.c133
-rw-r--r--drivers/bluetooth/btmtk.h42
-rw-r--r--drivers/bluetooth/btmtkuart.c1
-rw-r--r--drivers/bluetooth/btnxpuart.c39
-rw-r--r--drivers/bluetooth/btqca.c13
-rw-r--r--drivers/bluetooth/btqca.h12
-rw-r--r--drivers/bluetooth/btrtl.c233
-rw-r--r--drivers/bluetooth/btrtl.h13
-rw-r--r--drivers/bluetooth/btusb.c366
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c3
-rw-r--r--drivers/bluetooth/hci_nokia.c6
-rw-r--r--drivers/bluetooth/hci_qca.c164
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/bonding/bond_debugfs.c15
-rw-r--r--drivers/net/bonding/bond_main.c32
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/dsa/mv88e6060.c45
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c2
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c2
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c242
-rw-r--r--drivers/net/dsa/rzn1_a5psw.h8
-rw-r--r--drivers/net/ethernet/agere/et131x.c3
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c4
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c50
-rw-r--r--drivers/net/ethernet/freescale/fec.h6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c187
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c560
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c668
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c121
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c164
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h13
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h8
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h11
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c37
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c237
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c40
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c45
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c25
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c3
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c3
-rw-r--r--drivers/net/macsec.c17
-rw-r--r--drivers/net/pcs/pcs-lynx.c2
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c435
-rw-r--r--drivers/net/phy/phy-core.c2
-rw-r--r--drivers/net/phy/phylink.c3
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c236
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c23
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h43
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c419
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.h47
-rw-r--r--drivers/net/vxlan/vxlan_core.c13
-rw-r--r--drivers/net/wireguard/netlink.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/nfc/virtual_ncidev.c13
-rw-r--r--include/linux/memcontrol.h9
-rw-r--r--include/linux/mlx5/device.h69
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h46
-rw-r--r--include/linux/phy.h4
-rw-r--r--include/net/bluetooth/bluetooth.h11
-rw-r--r--include/net/bluetooth/hci.h11
-rw-r--r--include/net/bluetooth/hci_core.h99
-rw-r--r--include/net/bluetooth/hci_sync.h5
-rw-r--r--include/net/bluetooth/mgmt.h2
-rw-r--r--include/net/bluetooth/sco.h2
-rw-r--r--include/net/dropreason.h6
-rw-r--r--include/net/genetlink.h76
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_connection_sock.h7
-rw-r--r--include/net/inet_sock.h92
-rw-r--r--include/net/ip6_fib.h64
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/mana/mana.h87
-rw-r--r--include/net/netns/nftables.h2
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/uapi/linux/openvswitch.h2
-rw-r--r--mm/vmpressure.c8
-rw-r--r--net/bluetooth/af_bluetooth.c53
-rw-r--r--net/bluetooth/amp.h1
-rw-r--r--net/bluetooth/bnep/sock.c10
-rw-r--r--net/bluetooth/coredump.c3
-rw-r--r--net/bluetooth/hci_conn.c684
-rw-r--r--net/bluetooth/hci_core.c34
-rw-r--r--net/bluetooth/hci_debugfs.c3
-rw-r--r--net/bluetooth/hci_event.c201
-rw-r--r--net/bluetooth/hci_request.c21
-rw-r--r--net/bluetooth/hci_sock.c77
-rw-r--r--net/bluetooth/hci_sync.c263
-rw-r--r--net/bluetooth/hidp/sock.c10
-rw-r--r--net/bluetooth/iso.c134
-rw-r--r--net/bluetooth/l2cap_sock.c29
-rw-r--r--net/bluetooth/mgmt.c27
-rw-r--r--net/bluetooth/msft.c412
-rw-r--r--net/bluetooth/rfcomm/sock.c13
-rw-r--r--net/bluetooth/sco.c32
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/netdev-genl.c17
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/devlink/dev.c29
-rw-r--r--net/devlink/devl_internal.h44
-rw-r--r--net/devlink/health.c42
-rw-r--r--net/devlink/leftover.c425
-rw-r--r--net/devlink/netlink.c112
-rw-r--r--net/devlink/netlink_gen.c424
-rw-r--r--net/devlink/netlink_gen.h52
-rw-r--r--net/ethtool/channels.c2
-rw-r--r--net/ethtool/coalesce.c6
-rw-r--r--net/ethtool/debug.c2
-rw-r--r--net/ethtool/eee.c2
-rw-r--r--net/ethtool/eeprom.c9
-rw-r--r--net/ethtool/features.c2
-rw-r--r--net/ethtool/fec.c2
-rw-r--r--net/ethtool/linkinfo.c2
-rw-r--r--net/ethtool/linkmodes.c2
-rw-r--r--net/ethtool/linkstate.c2
-rw-r--r--net/ethtool/mm.c2
-rw-r--r--net/ethtool/module.c5
-rw-r--r--net/ethtool/netlink.c33
-rw-r--r--net/ethtool/netlink.h2
-rw-r--r--net/ethtool/pause.c5
-rw-r--r--net/ethtool/phc_vclocks.c2
-rw-r--r--net/ethtool/plca.c4
-rw-r--r--net/ethtool/privflags.c2
-rw-r--r--net/ethtool/pse-pd.c6
-rw-r--r--net/ethtool/rings.c5
-rw-r--r--net/ethtool/rss.c3
-rw-r--r--net/ethtool/stats.c5
-rw-r--r--net/ethtool/strset.c2
-rw-r--r--net/ethtool/tsinfo.c2
-rw-r--r--net/ethtool/tunnels.c2
-rw-r--r--net/ethtool/wol.c5
-rw-r--r--net/ieee802154/nl802154.c4
-rw-r--r--net/ipv4/af_inet.c62
-rw-r--r--net/ipv4/cipso_ipv4.c4
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_diag.c22
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_output.c7
-rw-r--r--net/ipv4/ip_sockglue.c405
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c2
-rw-r--r--net/ipv4/nexthop.c6
-rw-r--r--net/ipv4/ping.c7
-rw-r--r--net/ipv4/raw.c26
-rw-r--r--net/ipv4/route.c8
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c22
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv4/tcp_output.c14
-rw-r--r--net/ipv4/tcp_timer.c41
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv4/udp_tunnel_core.c2
-rw-r--r--net/ipv6/af_inet6.c18
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/ip6_fib.c55
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c12
-rw-r--r--net/ipv6/raw.c16
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/seg6_local.c108
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/mptcp/pm_netlink.c30
-rw-r--r--net/mptcp/protocol.c202
-rw-r--r--net/mptcp/protocol.h15
-rw-r--r--net/mptcp/sockopt.c77
-rw-r--r--net/ncsi/ncsi-netlink.c2
-rw-r--r--net/ncsi/ncsi-netlink.h2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c4
-rw-r--r--net/netlink/af_netlink.c90
-rw-r--r--net/netlink/af_netlink.h22
-rw-r--r--net/netlink/diag.c10
-rw-r--r--net/netlink/genetlink.c119
-rw-r--r--net/nfc/netlink.c4
-rw-r--r--net/openvswitch/actions.c42
-rw-r--r--net/openvswitch/conntrack.c5
-rw-r--r--net/openvswitch/datapath.c45
-rw-r--r--net/openvswitch/drop.h41
-rw-r--r--net/openvswitch/flow_netlink.c10
-rw-r--r--net/openvswitch/meter.c10
-rw-r--r--net/rds/rdma_transport.h1
-rw-r--r--net/rds/rds.h3
-rw-r--r--net/rds/tcp.h1
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/tipc/netlink_compat.c4
-rw-r--r--net/tipc/node.c4
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/tipc/udp_media.c2
-rw-r--r--tools/net/ynl/generated/devlink-user.c2208
-rw-r--r--tools/net/ynl/generated/devlink-user.h1772
-rw-r--r--tools/net/ynl/lib/ynl.py15
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh1
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh72
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_locked_port.sh36
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh102
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py22
-rwxr-xr-xtools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh1213
274 files changed, 13778 insertions, 3838 deletions
diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
index 56cbb42b5aea..2735c6a4f336 100644
--- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
@@ -19,6 +19,7 @@ properties:
- qcom,qca2066-bt
- qcom,qca6174-bt
- qcom,qca9377-bt
+ - qcom,wcn3988-bt
- qcom,wcn3990-bt
- qcom,wcn3991-bt
- qcom,wcn3998-bt
@@ -111,6 +112,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,wcn3988-bt
- qcom,wcn3990-bt
- qcom,wcn3991-bt
- qcom,wcn3998-bt
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index 6b0d359367da..9f6a5ccbcefe 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -66,6 +66,7 @@ properties:
- mii
- gmii
- sgmii
+ - psgmii
- qsgmii
- qusgmii
- tbi
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index f6df0b3fd502..d1ebcd927149 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -6,6 +6,16 @@ protocol: genetlink-legacy
doc: Partial family for Devlink.
+definitions:
+ -
+ type: enum
+ name: sb-pool-type
+ entries:
+ -
+ name: ingress
+ -
+ name: egress
+
attribute-sets:
-
name: devlink
@@ -25,6 +35,46 @@ attribute-sets:
# TODO: fill in the attributes in between
-
+ name: sb-index
+ type: u32
+ value: 11
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: sb-pool-index
+ type: u16
+ value: 17
+
+ -
+ name: sb-pool-type
+ type: u8
+ enum: sb-pool-type
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: sb-tc-index
+ type: u16
+ value: 22
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: param-name
+ type: string
+ value: 81
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: region-name
+ type: string
+ value: 88
+
+ # TODO: fill in the attributes in between
+
+ -
name: info-driver-name
type: string
value: 98
@@ -56,9 +106,34 @@ attribute-sets:
# TODO: fill in the attributes in between
-
+ name: health-reporter-name
+ type: string
+ value: 115
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: trap-name
+ type: string
+ value: 130
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: trap-group-name
+ type: string
+ value: 135
+
+ -
name: reload-failed
type: u8
- value: 136
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: trap-policer-id
+ type: u32
+ value: 142
# TODO: fill in the attributes in between
@@ -103,6 +178,21 @@ attribute-sets:
type: nest
multi-attr: true
nested-attributes: dl-reload-act-stats
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: rate-node-name
+ type: string
+ value: 168
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: linecard-index
+ type: u32
+ value: 171
+
-
name: dl-dev-stats
subset-of: devlink
@@ -188,6 +278,195 @@ operations:
dump:
reply: *get-reply
+ -
+ name: port-get
+ doc: Get devlink port instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ value: 5
+ attributes: &port-id-attrs
+ - bus-name
+ - dev-name
+ - port-index
+ reply:
+ value: 7
+ attributes: *port-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply:
+ value: 3 # due to a bug, port dump returns DEVLINK_CMD_NEW
+ attributes: *port-id-attrs
+
+ # TODO: fill in the operations in between
+
+ -
+ name: sb-get
+ doc: Get shared buffer instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ value: 11
+ attributes: &sb-id-attrs
+ - bus-name
+ - dev-name
+ - sb-index
+ reply: &sb-get-reply
+ value: 11
+ attributes: *sb-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *sb-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: sb-pool-get
+ doc: Get shared buffer pool instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ value: 15
+ attributes: &sb-pool-id-attrs
+ - bus-name
+ - dev-name
+ - sb-index
+ - sb-pool-index
+ reply: &sb-pool-get-reply
+ value: 15
+ attributes: *sb-pool-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *sb-pool-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: sb-port-pool-get
+ doc: Get shared buffer port-pool combinations and threshold.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ value: 19
+ attributes: &sb-port-pool-id-attrs
+ - bus-name
+ - dev-name
+ - port-index
+ - sb-index
+ - sb-pool-index
+ reply: &sb-port-pool-get-reply
+ value: 19
+ attributes: *sb-port-pool-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *sb-port-pool-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: sb-tc-pool-bind-get
+ doc: Get shared buffer port-TC to pool bindings and threshold.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ value: 23
+ attributes: &sb-tc-pool-bind-id-attrs
+ - bus-name
+ - dev-name
+ - port-index
+ - sb-index
+ - sb-pool-type
+ - sb-tc-index
+ reply: &sb-tc-pool-bind-get-reply
+ value: 23
+ attributes: *sb-tc-pool-bind-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *sb-tc-pool-bind-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: param-get
+ doc: Get param instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ value: 38
+ attributes: &param-id-attrs
+ - bus-name
+ - dev-name
+ - param-name
+ reply: &param-get-reply
+ value: 38
+ attributes: *param-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *param-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: region-get
+ doc: Get region instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit-port-optional
+ post: devlink-nl-post-doit
+ request:
+ value: 42
+ attributes: &region-id-attrs
+ - bus-name
+ - dev-name
+ - port-index
+ - region-name
+ reply: &region-get-reply
+ value: 42
+ attributes: *region-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *region-get-reply
+
# TODO: fill in the operations in between
-
@@ -216,3 +495,179 @@ operations:
- info-version-stored
dump:
reply: *info-get-reply
+
+ -
+ name: health-reporter-get
+ doc: Get health reporter instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit-port-optional
+ post: devlink-nl-post-doit
+ request:
+ attributes: &health-reporter-id-attrs
+ - bus-name
+ - dev-name
+ - port-index
+ - health-reporter-name
+ reply: &health-reporter-get-reply
+ attributes: *health-reporter-id-attrs
+ dump:
+ request:
+ attributes: *port-id-attrs
+ reply: *health-reporter-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: trap-get
+ doc: Get trap instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ value: 61
+ attributes: &trap-id-attrs
+ - bus-name
+ - dev-name
+ - trap-name
+ reply: &trap-get-reply
+ value: 61
+ attributes: *trap-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *trap-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: trap-group-get
+ doc: Get trap group instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ value: 65
+ attributes: &trap-group-id-attrs
+ - bus-name
+ - dev-name
+ - trap-group-name
+ reply: &trap-group-get-reply
+ value: 65
+ attributes: *trap-group-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *trap-group-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: trap-policer-get
+ doc: Get trap policer instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ value: 69
+ attributes: &trap-policer-id-attrs
+ - bus-name
+ - dev-name
+ - trap-policer-id
+ reply: &trap-policer-get-reply
+ value: 69
+ attributes: *trap-policer-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *trap-policer-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: rate-get
+ doc: Get rate instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ value: 74
+ attributes: &rate-id-attrs
+ - bus-name
+ - dev-name
+ - port-index
+ - rate-node-name
+ reply: &rate-get-reply
+ value: 74
+ attributes: *rate-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *rate-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: linecard-get
+ doc: Get line card instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ value: 78
+ attributes: &linecard-id-attrs
+ - bus-name
+ - dev-name
+ - linecard-index
+ reply: &linecard-get-reply
+ value: 78
+ attributes: *linecard-id-attrs
+ dump:
+ request:
+ attributes: *dev-id-attrs
+ reply: *linecard-get-reply
+
+ # TODO: fill in the operations in between
+
+ -
+ name: selftests-get
+ doc: Get device selftest instances.
+ attribute-set: devlink
+ dont-validate:
+ - strict
+ - dump
+
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ value: 82
+ attributes: *dev-id-attrs
+ reply: &selftests-get-reply
+ value: 82
+ attributes: *dev-id-attrs
+ dump:
+ reply: *selftests-get-reply
diff --git a/Documentation/netlink/specs/ovs_vport.yaml b/Documentation/netlink/specs/ovs_vport.yaml
index 17336455bec1..ef298b001445 100644
--- a/Documentation/netlink/specs/ovs_vport.yaml
+++ b/Documentation/netlink/specs/ovs_vport.yaml
@@ -82,6 +82,10 @@ attribute-sets:
enum-name: ovs-vport-attr
attributes:
-
+ name: unspec
+ type: unused
+ value: 0
+ -
name: port-no
type: u32
-
@@ -121,6 +125,20 @@ operations:
name-prefix: ovs-vport-cmd-
list:
-
+ name: new
+ doc: Create a new OVS vport
+ attribute-set: vport
+ fixed-header: ovs-header
+ do:
+ request:
+ attributes:
+ - name
+ - type
+ - upcall-pid
+ - dp-ifindex
+ - ifindex
+ - options
+ -
name: get
doc: Get / dump OVS vport configuration and state
value: 3
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
index a395df9c2751..008e560e12b5 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
@@ -683,6 +683,12 @@ the software port.
time protocol.
- Error
+ * - `ptp_cq[i]_late_cqe`
+ - Number of times a CQE has been delivered on the PTP timestamping CQ when
+ the CQE was not expected since a certain amount of time had elapsed where
+ the device typically ensures not posting the CQE.
+ - Error
+
.. [#ring_global] The corresponding ring and global counters do not share the
same name (i.e. do not follow the common naming scheme).
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst
deleted file mode 100644
index a4edf908b707..000000000000
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst
+++ /dev/null
@@ -1,313 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-.. include:: <isonum.txt>
-
-=======
-Devlink
-=======
-
-:Copyright: |copy| 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-
-Contents
-========
-
-- `Info`_
-- `Parameters`_
-- `Health reporters`_
-
-Info
-====
-
-The devlink info reports the running and stored firmware versions on device.
-It also prints the device PSID which represents the HCA board type ID.
-
-User command example::
-
- $ devlink dev info pci/0000:00:06.0
- pci/0000:00:06.0:
- driver mlx5_core
- versions:
- fixed:
- fw.psid MT_0000000009
- running:
- fw.version 16.26.0100
- stored:
- fw.version 16.26.0100
-
-Parameters
-==========
-
-flow_steering_mode: Device flow steering mode
----------------------------------------------
-The flow steering mode parameter controls the flow steering mode of the driver.
-Two modes are supported:
-
-1. 'dmfs' - Device managed flow steering.
-2. 'smfs' - Software/Driver managed flow steering.
-
-In DMFS mode, the HW steering entities are created and managed through the
-Firmware.
-In SMFS mode, the HW steering entities are created and managed though by
-the driver directly into hardware without firmware intervention.
-
-SMFS mode is faster and provides better rule insertion rate compared to default DMFS mode.
-
-User command examples:
-
-- Set SMFS flow steering mode::
-
- $ devlink dev param set pci/0000:06:00.0 name flow_steering_mode value "smfs" cmode runtime
-
-- Read device flow steering mode::
-
- $ devlink dev param show pci/0000:06:00.0 name flow_steering_mode
- pci/0000:06:00.0:
- name flow_steering_mode type driver-specific
- values:
- cmode runtime value smfs
-
-enable_roce: RoCE enablement state
-----------------------------------
-If the device supports RoCE disablement, RoCE enablement state controls device
-support for RoCE capability. Otherwise, the control occurs in the driver stack.
-When RoCE is disabled at the driver level, only raw ethernet QPs are supported.
-
-To change RoCE enablement state, a user must change the driverinit cmode value
-and run devlink reload.
-
-User command examples:
-
-- Disable RoCE::
-
- $ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit
- $ devlink dev reload pci/0000:06:00.0
-
-- Read RoCE enablement state::
-
- $ devlink dev param show pci/0000:06:00.0 name enable_roce
- pci/0000:06:00.0:
- name enable_roce type generic
- values:
- cmode driverinit value true
-
-esw_port_metadata: Eswitch port metadata state
-----------------------------------------------
-When applicable, disabling eswitch metadata can increase packet rate
-up to 20% depending on the use case and packet sizes.
-
-Eswitch port metadata state controls whether to internally tag packets with
-metadata. Metadata tagging must be enabled for multi-port RoCE, failover
-between representors and stacked devices.
-By default metadata is enabled on the supported devices in E-switch.
-Metadata is applicable only for E-switch in switchdev mode and
-users may disable it when NONE of the below use cases will be in use:
-
-1. HCA is in Dual/multi-port RoCE mode.
-2. VF/SF representor bonding (Usually used for Live migration)
-3. Stacked devices
-
-When metadata is disabled, the above use cases will fail to initialize if
-users try to enable them.
-
-- Show eswitch port metadata::
-
- $ devlink dev param show pci/0000:06:00.0 name esw_port_metadata
- pci/0000:06:00.0:
- name esw_port_metadata type driver-specific
- values:
- cmode runtime value true
-
-- Disable eswitch port metadata::
-
- $ devlink dev param set pci/0000:06:00.0 name esw_port_metadata value false cmode runtime
-
-- Change eswitch mode to switchdev mode where after choosing the metadata value::
-
- $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev
-
-hairpin_num_queues: Number of hairpin queues
---------------------------------------------
-We refer to a TC NIC rule that involves forwarding as "hairpin".
-
-Hairpin queues are mlx5 hardware specific implementation for hardware
-forwarding of such packets.
-
-- Show the number of hairpin queues::
-
- $ devlink dev param show pci/0000:06:00.0 name hairpin_num_queues
- pci/0000:06:00.0:
- name hairpin_num_queues type driver-specific
- values:
- cmode driverinit value 2
-
-- Change the number of hairpin queues::
-
- $ devlink dev param set pci/0000:06:00.0 name hairpin_num_queues value 4 cmode driverinit
-
-hairpin_queue_size: Size of the hairpin queues
-----------------------------------------------
-Control the size of the hairpin queues.
-
-- Show the size of the hairpin queues::
-
- $ devlink dev param show pci/0000:06:00.0 name hairpin_queue_size
- pci/0000:06:00.0:
- name hairpin_queue_size type driver-specific
- values:
- cmode driverinit value 1024
-
-- Change the size (in packets) of the hairpin queues::
-
- $ devlink dev param set pci/0000:06:00.0 name hairpin_queue_size value 512 cmode driverinit
-
-Health reporters
-================
-
-tx reporter
------------
-The tx reporter is responsible for reporting and recovering of the following two error scenarios:
-
-- tx timeout
- Report on kernel tx timeout detection.
- Recover by searching lost interrupts.
-- tx error completion
- Report on error tx completion.
- Recover by flushing the tx queue and reset it.
-
-tx reporter also support on demand diagnose callback, on which it provides
-real time information of its send queues status.
-
-User commands examples:
-
-- Diagnose send queues status::
-
- $ devlink health diagnose pci/0000:82:00.0 reporter tx
-
-.. note::
- This command has valid output only when interface is up, otherwise the command has empty output.
-
-- Show number of tx errors indicated, number of recover flows ended successfully,
- is autorecover enabled and graceful period from last recover::
-
- $ devlink health show pci/0000:82:00.0 reporter tx
-
-rx reporter
------------
-The rx reporter is responsible for reporting and recovering of the following two error scenarios:
-
-- rx queues' initialization (population) timeout
- Population of rx queues' descriptors on ring initialization is done
- in napi context via triggering an irq. In case of a failure to get
- the minimum amount of descriptors, a timeout would occur, and
- descriptors could be recovered by polling the EQ (Event Queue).
-- rx completions with errors (reported by HW on interrupt context)
- Report on rx completion error.
- Recover (if needed) by flushing the related queue and reset it.
-
-rx reporter also supports on demand diagnose callback, on which it
-provides real time information of its receive queues' status.
-
-- Diagnose rx queues' status and corresponding completion queue::
-
- $ devlink health diagnose pci/0000:82:00.0 reporter rx
-
-NOTE: This command has valid output only when interface is up. Otherwise, the command has empty output.
-
-- Show number of rx errors indicated, number of recover flows ended successfully,
- is autorecover enabled, and graceful period from last recover::
-
- $ devlink health show pci/0000:82:00.0 reporter rx
-
-fw reporter
------------
-The fw reporter implements `diagnose` and `dump` callbacks.
-It follows symptoms of fw error such as fw syndrome by triggering
-fw core dump and storing it into the dump buffer.
-The fw reporter diagnose command can be triggered any time by the user to check
-current fw status.
-
-User commands examples:
-
-- Check fw heath status::
-
- $ devlink health diagnose pci/0000:82:00.0 reporter fw
-
-- Read FW core dump if already stored or trigger new one::
-
- $ devlink health dump show pci/0000:82:00.0 reporter fw
-
-.. note::
- This command can run only on the PF which has fw tracer ownership,
- running it on other PF or any VF will return "Operation not permitted".
-
-fw fatal reporter
------------------
-The fw fatal reporter implements `dump` and `recover` callbacks.
-It follows fatal errors indications by CR-space dump and recover flow.
-The CR-space dump uses vsc interface which is valid even if the FW command
-interface is not functional, which is the case in most FW fatal errors.
-The recover function runs recover flow which reloads the driver and triggers fw
-reset if needed.
-On firmware error, the health buffer is dumped into the dmesg. The log
-level is derived from the error's severity (given in health buffer).
-
-User commands examples:
-
-- Run fw recover flow manually::
-
- $ devlink health recover pci/0000:82:00.0 reporter fw_fatal
-
-- Read FW CR-space dump if already stored or trigger new one::
-
- $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
-
-.. note::
- This command can run only on PF.
-
-vnic reporter
--------------
-The vnic reporter implements only the `diagnose` callback.
-It is responsible for querying the vnic diagnostic counters from fw and displaying
-them in realtime.
-
-Description of the vnic counters:
-
-- total_q_under_processor_handle
- number of queues in an error state due to
- an async error or errored command.
-- send_queue_priority_update_flow
- number of QP/SQ priority/SL update events.
-- cq_overrun
- number of times CQ entered an error state due to an overflow.
-- async_eq_overrun
- number of times an EQ mapped to async events was overrun.
- comp_eq_overrun number of times an EQ mapped to completion events was
- overrun.
-- quota_exceeded_command
- number of commands issued and failed due to quota exceeded.
-- invalid_command
- number of commands issued and failed dues to any reason other than quota
- exceeded.
-- nic_receive_steering_discard
- number of packets that completed RX flow
- steering but were discarded due to a mismatch in flow table.
-- generated_pkt_steering_fail
- number of packets generated by the VNIC experiencing unexpected steering
- failure (at any point in steering flow).
-- handled_pkt_steering_fail
- number of packets handled by the VNIC experiencing unexpected steering
- failure (at any point in steering flow owned by the VNIC, including the FDB
- for the eswitch owner).
-
-User commands examples:
-
-- Diagnose PF/VF vnic counters::
-
- $ devlink health diagnose pci/0000:82:00.1 reporter vnic
-
-- Diagnose representor vnic counters (performed by supplying devlink port of the
- representor, which can be obtained via devlink port command)::
-
- $ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
-
-.. note::
- This command can run over all interfaces such as PF/VF and representor ports.
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
index 3fdcd6b61ccf..581a91caa579 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/index.rst
@@ -13,7 +13,6 @@ Contents:
:maxdepth: 2
kconfig
- devlink
switchdev
tracepoints
counters
diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
index 202798d6501e..702f204a3dbd 100644
--- a/Documentation/networking/devlink/mlx5.rst
+++ b/Documentation/networking/devlink/mlx5.rst
@@ -18,6 +18,11 @@ Parameters
* - ``enable_roce``
- driverinit
- Type: Boolean
+
+ If the device supports RoCE disablement, RoCE enablement state controls
+ device support for RoCE capability. Otherwise, the control occurs in the
+ driver stack. When RoCE is disabled at the driver level, only raw
+ ethernet QPs are supported.
* - ``io_eq_size``
- driverinit
- The range is between 64 and 4096.
@@ -48,6 +53,9 @@ parameters.
* ``smfs`` Software managed flow steering. In SMFS mode, the HW
steering entities are created and manage through the driver without
firmware intervention.
+
+ SMFS mode is faster and provides better rule insertion rate compared to
+ default DMFS mode.
* - ``fdb_large_groups``
- u32
- driverinit
@@ -71,7 +79,24 @@ parameters.
deprecated.
Default: disabled
+ * - ``esw_port_metadata``
+ - Boolean
+ - runtime
+ - When applicable, disabling eswitch metadata can increase packet rate up
+ to 20% depending on the use case and packet sizes.
+
+ Eswitch port metadata state controls whether to internally tag packets
+ with metadata. Metadata tagging must be enabled for multi-port RoCE,
+ failover between representors and stacked devices. By default metadata is
+ enabled on the supported devices in E-switch. Metadata is applicable only
+ for E-switch in switchdev mode and users may disable it when NONE of the
+ below use cases will be in use:
+ 1. HCA is in Dual/multi-port RoCE mode.
+ 2. VF/SF representor bonding (Usually used for Live migration)
+ 3. Stacked devices
+ When metadata is disabled, the above use cases will fail to initialize if
+ users try to enable them.
* - ``hairpin_num_queues``
- u32
- driverinit
@@ -104,3 +129,160 @@ The ``mlx5`` driver reports the following versions
* - ``fw.version``
- stored, running
- Three digit major.minor.subminor firmware version number.
+
+Health reporters
+================
+
+tx reporter
+-----------
+The tx reporter is responsible for reporting and recovering of the following three error scenarios:
+
+- tx timeout
+ Report on kernel tx timeout detection.
+ Recover by searching lost interrupts.
+- tx error completion
+ Report on error tx completion.
+ Recover by flushing the tx queue and reset it.
+- tx PTP port timestamping CQ unhealthy
+ Report too many CQEs never delivered on port ts CQ.
+ Recover by flushing and re-creating all PTP channels.
+
+tx reporter also support on demand diagnose callback, on which it provides
+real time information of its send queues status.
+
+User commands examples:
+
+- Diagnose send queues status::
+
+ $ devlink health diagnose pci/0000:82:00.0 reporter tx
+
+.. note::
+ This command has valid output only when interface is up, otherwise the command has empty output.
+
+- Show number of tx errors indicated, number of recover flows ended successfully,
+ is autorecover enabled and graceful period from last recover::
+
+ $ devlink health show pci/0000:82:00.0 reporter tx
+
+rx reporter
+-----------
+The rx reporter is responsible for reporting and recovering of the following two error scenarios:
+
+- rx queues' initialization (population) timeout
+ Population of rx queues' descriptors on ring initialization is done
+ in napi context via triggering an irq. In case of a failure to get
+ the minimum amount of descriptors, a timeout would occur, and
+ descriptors could be recovered by polling the EQ (Event Queue).
+- rx completions with errors (reported by HW on interrupt context)
+ Report on rx completion error.
+ Recover (if needed) by flushing the related queue and reset it.
+
+rx reporter also supports on demand diagnose callback, on which it
+provides real time information of its receive queues' status.
+
+- Diagnose rx queues' status and corresponding completion queue::
+
+ $ devlink health diagnose pci/0000:82:00.0 reporter rx
+
+.. note::
+ This command has valid output only when interface is up. Otherwise, the command has empty output.
+
+- Show number of rx errors indicated, number of recover flows ended successfully,
+ is autorecover enabled, and graceful period from last recover::
+
+ $ devlink health show pci/0000:82:00.0 reporter rx
+
+fw reporter
+-----------
+The fw reporter implements `diagnose` and `dump` callbacks.
+It follows symptoms of fw error such as fw syndrome by triggering
+fw core dump and storing it into the dump buffer.
+The fw reporter diagnose command can be triggered any time by the user to check
+current fw status.
+
+User commands examples:
+
+- Check fw heath status::
+
+ $ devlink health diagnose pci/0000:82:00.0 reporter fw
+
+- Read FW core dump if already stored or trigger new one::
+
+ $ devlink health dump show pci/0000:82:00.0 reporter fw
+
+.. note::
+ This command can run only on the PF which has fw tracer ownership,
+ running it on other PF or any VF will return "Operation not permitted".
+
+fw fatal reporter
+-----------------
+The fw fatal reporter implements `dump` and `recover` callbacks.
+It follows fatal errors indications by CR-space dump and recover flow.
+The CR-space dump uses vsc interface which is valid even if the FW command
+interface is not functional, which is the case in most FW fatal errors.
+The recover function runs recover flow which reloads the driver and triggers fw
+reset if needed.
+On firmware error, the health buffer is dumped into the dmesg. The log
+level is derived from the error's severity (given in health buffer).
+
+User commands examples:
+
+- Run fw recover flow manually::
+
+ $ devlink health recover pci/0000:82:00.0 reporter fw_fatal
+
+- Read FW CR-space dump if already stored or trigger new one::
+
+ $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
+
+.. note::
+ This command can run only on PF.
+
+vnic reporter
+-------------
+The vnic reporter implements only the `diagnose` callback.
+It is responsible for querying the vnic diagnostic counters from fw and displaying
+them in realtime.
+
+Description of the vnic counters:
+
+- total_q_under_processor_handle
+ number of queues in an error state due to
+ an async error or errored command.
+- send_queue_priority_update_flow
+ number of QP/SQ priority/SL update events.
+- cq_overrun
+ number of times CQ entered an error state due to an overflow.
+- async_eq_overrun
+ number of times an EQ mapped to async events was overrun.
+ comp_eq_overrun number of times an EQ mapped to completion events was
+ overrun.
+- quota_exceeded_command
+ number of commands issued and failed due to quota exceeded.
+- invalid_command
+ number of commands issued and failed dues to any reason other than quota
+ exceeded.
+- nic_receive_steering_discard
+ number of packets that completed RX flow
+ steering but were discarded due to a mismatch in flow table.
+- generated_pkt_steering_fail
+ number of packets generated by the VNIC experiencing unexpected steering
+ failure (at any point in steering flow).
+- handled_pkt_steering_fail
+ number of packets handled by the VNIC experiencing unexpected steering
+ failure (at any point in steering flow owned by the VNIC, including the FDB
+ for the eswitch owner).
+
+User commands examples:
+
+- Diagnose PF/VF vnic counters::
+
+ $ devlink health diagnose pci/0000:82:00.1 reporter vnic
+
+- Diagnose representor vnic counters (performed by supplying devlink port of the
+ representor, which can be obtained via devlink port command)::
+
+ $ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
+
+.. note::
+ This command can run over all interfaces such as PF/VF and representor ports.
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index b7ac4c64cf67..1283240d7620 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -323,6 +323,10 @@ Some of the interface modes are described below:
contrast with the 1000BASE-X phy mode used for Clause 38 and 39 PMDs, this
interface mode has different autonegotiation and only supports full duplex.
+``PHY_INTERFACE_MODE_PSGMII``
+ This is the Penta SGMII mode, it is similar to QSGMII but it combines 5
+ SGMII lines into a single link compared to 4 on QSGMII.
+
Pause frames / flow control
===========================
diff --git a/MAINTAINERS b/MAINTAINERS
index d984c9a7b12c..f150561a396e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14658,7 +14658,7 @@ F: drivers/rtc/rtc-ntxec.c
F: include/linux/mfd/ntxec.h
NETRONOME ETHERNET DRIVERS
-M: Simon Horman <[email protected]>
+M: Louis Peens <[email protected]>
R: Jakub Kicinski <[email protected]>
S: Maintained
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index cddae6f4b00f..d3538bd83fb3 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -159,7 +159,7 @@ static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
struct sk_buff *skb, struct genl_info *info, unsigned flags)
{
- struct drbd_genlmsghdr *d_in = info->userhdr;
+ struct drbd_genlmsghdr *d_in = genl_info_userhdr(info);
const u8 cmd = info->genlhdr->cmd;
int err;
@@ -1396,8 +1396,9 @@ static void drbd_suspend_al(struct drbd_device *device)
static bool should_set_defaults(struct genl_info *info)
{
- unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
- return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
+ struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
+
+ return 0 != (dh->flags & DRBD_GENL_F_SET_DEFAULTS);
}
static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
@@ -4276,7 +4277,7 @@ static void device_to_info(struct device_info *info,
int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
- struct drbd_genlmsghdr *dh = info->userhdr;
+ struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
enum drbd_ret_code retcode;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index de2ea589aa49..0a5445ac5e1b 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -24,6 +24,7 @@
#define BDADDR_BCM20702A1 (&(bdaddr_t) {{0x00, 0x00, 0xa0, 0x02, 0x70, 0x20}})
#define BDADDR_BCM2076B1 (&(bdaddr_t) {{0x79, 0x56, 0x00, 0xa0, 0x76, 0x20}})
#define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
+#define BDADDR_BCM43430A1 (&(bdaddr_t) {{0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa}})
#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
#define BDADDR_BCM4334B0 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb0, 0x34, 0x43}})
@@ -115,6 +116,9 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
*
* The address 43:43:A0:12:1F:AC indicates a BCM43430A0 controller
* with no configured address.
+ *
+ * The address AA:AA:AA:AA:AA:AA indicates a BCM43430A1 controller
+ * with no configured address.
*/
if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM20702A1) ||
@@ -124,6 +128,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
!bacmp(&bda->bdaddr, BDADDR_BCM4334B0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4345C5) ||
!bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM43430A1) ||
!bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
/* Try falling back to BDADDR EFI variable */
if (btbcm_set_bdaddr_from_efi(hdev) != 0) {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index d9349ba48281..633e8d9bf58f 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -10,6 +10,7 @@
#include <linux/firmware.h>
#include <linux/regmap.h>
#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -27,6 +28,11 @@
#define BTINTEL_PPAG_NAME "PPAG"
+enum {
+ DSM_SET_WDISABLE2_DELAY = 1,
+ DSM_SET_RESET_METHOD = 3,
+};
+
/* structure to store the PPAG data read from ACPI table */
struct btintel_ppag {
u32 domain;
@@ -49,6 +55,10 @@ static struct {
u32 fw_build_num;
} coredump_info;
+static const guid_t btintel_guid_dsm =
+ GUID_INIT(0xaa10f4e0, 0x81ac, 0x4233,
+ 0xab, 0xf6, 0x3b, 0x2a, 0xc5, 0x0e, 0x28, 0xd9);
+
int btintel_check_bdaddr(struct hci_dev *hdev)
{
struct hci_rp_read_bd_addr *bda;
@@ -470,6 +480,7 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x18: /* Slr */
case 0x19: /* Slr-F */
case 0x1b: /* Mgr */
+ case 0x1c: /* Gale Peak (GaP) */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -2444,6 +2455,116 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
kfree_skb(skb);
}
+static int btintel_acpi_reset_method(struct hci_dev *hdev)
+{
+ int ret = 0;
+ acpi_status status;
+ union acpi_object *p, *ref;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = acpi_evaluate_object(ACPI_HANDLE(GET_HCIDEV_DEV(hdev)), "_PRR", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ bt_dev_err(hdev, "Failed to run _PRR method");
+ ret = -ENODEV;
+ return ret;
+ }
+ p = buffer.pointer;
+
+ if (p->package.count != 1 || p->type != ACPI_TYPE_PACKAGE) {
+ bt_dev_err(hdev, "Invalid arguments");
+ ret = -EINVAL;
+ goto exit_on_error;
+ }
+
+ ref = &p->package.elements[0];
+ if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) {
+ bt_dev_err(hdev, "Invalid object type: 0x%x", ref->type);
+ ret = -EINVAL;
+ goto exit_on_error;
+ }
+
+ status = acpi_evaluate_object(ref->reference.handle, "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ bt_dev_err(hdev, "Failed to run_RST method");
+ ret = -ENODEV;
+ goto exit_on_error;
+ }
+
+exit_on_error:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static void btintel_set_dsm_reset_method(struct hci_dev *hdev,
+ struct intel_version_tlv *ver_tlv)
+{
+ struct btintel_data *data = hci_get_priv(hdev);
+ acpi_handle handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev));
+ u8 reset_payload[4] = {0x01, 0x00, 0x01, 0x00};
+ union acpi_object *obj, argv4;
+ enum {
+ RESET_TYPE_WDISABLE2,
+ RESET_TYPE_VSEC
+ };
+
+ handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev));
+
+ if (!handle) {
+ bt_dev_dbg(hdev, "No support for bluetooth device in ACPI firmware");
+ return;
+ }
+
+ if (!acpi_has_method(handle, "_PRR")) {
+ bt_dev_err(hdev, "No support for _PRR ACPI method");
+ return;
+ }
+
+ switch (ver_tlv->cnvi_top & 0xfff) {
+ case 0x910: /* GalePeak2 */
+ reset_payload[2] = RESET_TYPE_VSEC;
+ break;
+ default:
+ /* WDISABLE2 is the default reset method */
+ reset_payload[2] = RESET_TYPE_WDISABLE2;
+
+ if (!acpi_check_dsm(handle, &btintel_guid_dsm, 0,
+ BIT(DSM_SET_WDISABLE2_DELAY))) {
+ bt_dev_err(hdev, "No dsm support to set reset delay");
+ return;
+ }
+ argv4.integer.type = ACPI_TYPE_INTEGER;
+ /* delay required to toggle BT power */
+ argv4.integer.value = 160;
+ obj = acpi_evaluate_dsm(handle, &btintel_guid_dsm, 0,
+ DSM_SET_WDISABLE2_DELAY, &argv4);
+ if (!obj) {
+ bt_dev_err(hdev, "Failed to call dsm to set reset delay");
+ return;
+ }
+ ACPI_FREE(obj);
+ }
+
+ bt_dev_info(hdev, "DSM reset method type: 0x%02x", reset_payload[2]);
+
+ if (!acpi_check_dsm(handle, &btintel_guid_dsm, 0,
+ DSM_SET_RESET_METHOD)) {
+ bt_dev_warn(hdev, "No support for dsm to set reset method");
+ return;
+ }
+ argv4.buffer.type = ACPI_TYPE_BUFFER;
+ argv4.buffer.length = sizeof(reset_payload);
+ argv4.buffer.pointer = reset_payload;
+
+ obj = acpi_evaluate_dsm(handle, &btintel_guid_dsm, 0,
+ DSM_SET_RESET_METHOD, &argv4);
+ if (!obj) {
+ bt_dev_err(hdev, "Failed to call dsm to set reset method");
+ return;
+ }
+ ACPI_FREE(obj);
+ data->acpi_reset_method = btintel_acpi_reset_method;
+}
+
static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@@ -2528,6 +2649,7 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x18:
case 0x19:
case 0x1b:
+ case 0x1c:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -2742,6 +2864,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x18:
case 0x19:
case 0x1b:
+ case 0x1c:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -2757,6 +2880,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
+ btintel_set_dsm_reset_method(hdev, &ver_tlv);
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
btintel_register_devcoredump_support(hdev);
@@ -2824,6 +2948,80 @@ int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name)
}
EXPORT_SYMBOL_GPL(btintel_configure_setup);
+static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct intel_tlv *tlv = (void *)&skb->data[5];
+
+ /* The first event is always an event type TLV */
+ if (tlv->type != INTEL_TLV_TYPE_ID)
+ goto recv_frame;
+
+ switch (tlv->val[0]) {
+ case INTEL_TLV_SYSTEM_EXCEPTION:
+ case INTEL_TLV_FATAL_EXCEPTION:
+ case INTEL_TLV_DEBUG_EXCEPTION:
+ case INTEL_TLV_TEST_EXCEPTION:
+ /* Generate devcoredump from exception */
+ if (!hci_devcd_init(hdev, skb->len)) {
+ hci_devcd_append(hdev, skb);
+ hci_devcd_complete(hdev);
+ } else {
+ bt_dev_err(hdev, "Failed to generate devcoredump");
+ kfree_skb(skb);
+ }
+ return 0;
+ default:
+ bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]);
+ }
+
+recv_frame:
+ return hci_recv_frame(hdev, skb);
+}
+
+int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 };
+
+ if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
+ hdr->plen > 0) {
+ const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
+ unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
+
+ if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
+ switch (skb->data[2]) {
+ case 0x02:
+ /* When switching to the operational firmware
+ * the device sends a vendor specific event
+ * indicating that the bootup completed.
+ */
+ btintel_bootup(hdev, ptr, len);
+ break;
+ case 0x06:
+ /* When the firmware loading completes the
+ * device sends out a vendor specific event
+ * indicating the result of the firmware
+ * loading.
+ */
+ btintel_secure_send_result(hdev, ptr, len);
+ break;
+ }
+ }
+
+ /* Handle all diagnostics events separately. May still call
+ * hci_recv_frame.
+ */
+ if (len >= sizeof(diagnostics_hdr) &&
+ memcmp(&skb->data[2], diagnostics_hdr,
+ sizeof(diagnostics_hdr)) == 0) {
+ return btintel_diagnostics(hdev, skb);
+ }
+ }
+
+ return hci_recv_frame(hdev, skb);
+}
+EXPORT_SYMBOL_GPL(btintel_recv_event);
+
void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len)
{
const struct intel_bootup *evt = ptr;
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index d6a1dc8d8a82..2ed646609dee 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -166,12 +166,14 @@ enum {
INTEL_BROKEN_SHUTDOWN_LED,
INTEL_ROM_LEGACY,
INTEL_ROM_LEGACY_NO_WBS_SUPPORT,
+ INTEL_ACPI_RESET_ACTIVE,
__INTEL_NUM_FLAGS,
};
struct btintel_data {
DECLARE_BITMAP(flags, __INTEL_NUM_FLAGS);
+ int (*acpi_reset_method)(struct hci_dev *hdev);
};
#define btintel_set_flag(hdev, nr) \
@@ -220,6 +222,7 @@ int btintel_read_boot_params(struct hci_dev *hdev,
int btintel_download_firmware(struct hci_dev *dev, struct intel_version *ver,
const struct firmware *fw, u32 *boot_param);
int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name);
+int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb);
void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len);
void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len);
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 809762d64fc6..aaabb732082c 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -53,10 +53,61 @@ struct btmtk_section_map {
};
} __packed;
+static void btmtk_coredump(struct hci_dev *hdev)
+{
+ int err;
+
+ err = __hci_cmd_send(hdev, 0xfd5b, 0, NULL);
+ if (err < 0)
+ bt_dev_err(hdev, "Coredump failed (%d)", err);
+}
+
+static void btmtk_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmediatek_data *data = hci_get_priv(hdev);
+ char buf[80];
+
+ snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
+ data->dev_id);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Firmware Version: 0x%X\n",
+ data->cd_info.fw_version);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Driver: %s\n",
+ data->cd_info.driver_name);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Vendor: MediaTek\n");
+ skb_put_data(skb, buf, strlen(buf));
+}
+
+static void btmtk_coredump_notify(struct hci_dev *hdev, int state)
+{
+ struct btmediatek_data *data = hci_get_priv(hdev);
+
+ switch (state) {
+ case HCI_DEVCOREDUMP_IDLE:
+ data->cd_info.state = HCI_DEVCOREDUMP_IDLE;
+ break;
+ case HCI_DEVCOREDUMP_ACTIVE:
+ data->cd_info.state = HCI_DEVCOREDUMP_ACTIVE;
+ break;
+ case HCI_DEVCOREDUMP_TIMEOUT:
+ case HCI_DEVCOREDUMP_ABORT:
+ case HCI_DEVCOREDUMP_DONE:
+ data->cd_info.state = HCI_DEVCOREDUMP_IDLE;
+ btmtk_reset_sync(hdev);
+ break;
+ }
+}
+
int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
{
struct btmtk_hci_wmt_params wmt_params;
+ struct btmtk_patch_header *hdr;
struct btmtk_global_desc *globaldesc = NULL;
struct btmtk_section_map *sectionmap;
const struct firmware *fw;
@@ -75,9 +126,13 @@ int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
fw_ptr = fw->data;
fw_bin_ptr = fw_ptr;
+ hdr = (struct btmtk_patch_header *)fw_ptr;
globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
section_num = le32_to_cpu(globaldesc->section_num);
+ bt_dev_info(hdev, "HW/SW Version: 0x%04x%04x, Build Time: %s",
+ le16_to_cpu(hdr->hwver), le16_to_cpu(hdr->swver), hdr->datetime);
+
for (i = 0; i < section_num; i++) {
first_block = 1;
fw_ptr = fw_bin_ptr;
@@ -280,6 +335,83 @@ int btmtk_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(btmtk_set_bdaddr);
+void btmtk_reset_sync(struct hci_dev *hdev)
+{
+ struct btmediatek_data *reset_work = hci_get_priv(hdev);
+ int err;
+
+ hci_dev_lock(hdev);
+
+ err = hci_cmd_sync_queue(hdev, reset_work->reset_sync, NULL, NULL);
+ if (err)
+ bt_dev_err(hdev, "failed to reset (%d)", err);
+
+ hci_dev_unlock(hdev);
+}
+EXPORT_SYMBOL_GPL(btmtk_reset_sync);
+
+int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
+ u32 fw_version)
+{
+ struct btmediatek_data *data = hci_get_priv(hdev);
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ return -EOPNOTSUPP;
+
+ data->cd_info.fw_version = fw_version;
+ data->cd_info.state = HCI_DEVCOREDUMP_IDLE;
+ data->cd_info.driver_name = name;
+
+ return hci_devcd_register(hdev, btmtk_coredump, btmtk_coredump_hdr,
+ btmtk_coredump_notify);
+}
+EXPORT_SYMBOL_GPL(btmtk_register_coredump);
+
+int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmediatek_data *data = hci_get_priv(hdev);
+ int err;
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ return 0;
+
+ switch (data->cd_info.state) {
+ case HCI_DEVCOREDUMP_IDLE:
+ err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
+ if (err < 0)
+ break;
+ data->cd_info.cnt = 0;
+
+ /* It is supposed coredump can be done within 5 seconds */
+ schedule_delayed_work(&hdev->dump.dump_timeout,
+ msecs_to_jiffies(5000));
+ fallthrough;
+ case HCI_DEVCOREDUMP_ACTIVE:
+ default:
+ err = hci_devcd_append(hdev, skb);
+ if (err < 0)
+ break;
+ data->cd_info.cnt++;
+
+ /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
+ if (data->cd_info.cnt > MTK_COREDUMP_NUM &&
+ skb->len > MTK_COREDUMP_END_LEN)
+ if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
+ MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) {
+ bt_dev_info(hdev, "Mediatek coredump end");
+ hci_devcd_complete(hdev);
+ }
+
+ break;
+ }
+
+ if (err < 0)
+ kfree_skb(skb);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btmtk_process_coredump);
+
MODULE_AUTHOR("Sean Wang <[email protected]>");
MODULE_AUTHOR("Mark Chen <[email protected]>");
MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION);
@@ -289,3 +421,4 @@ MODULE_FIRMWARE(FIRMWARE_MT7622);
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
MODULE_FIRMWARE(FIRMWARE_MT7961);
+MODULE_FIRMWARE(FIRMWARE_MT7925);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
index 2a88ea8e475e..56f5502baadf 100644
--- a/drivers/bluetooth/btmtk.h
+++ b/drivers/bluetooth/btmtk.h
@@ -5,6 +5,7 @@
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
+#define FIRMWARE_MT7925 "mediatek/mt7925/BT_RAM_CODE_MT7925_1_1_hdr.bin"
#define HCI_EV_WMT 0xe4
#define HCI_WMT_MAX_EVENT_SIZE 64
@@ -21,6 +22,11 @@
#define MT7921_DLSTATUS 0x7c053c10
#define BT_DL_STATE BIT(1)
+#define MTK_COREDUMP_SIZE (1024 * 1000)
+#define MTK_COREDUMP_END "coredump end"
+#define MTK_COREDUMP_END_LEN (sizeof(MTK_COREDUMP_END))
+#define MTK_COREDUMP_NUM 255
+
enum {
BTMTK_WMT_PATCH_DWNLD = 0x1,
BTMTK_WMT_TEST = 0x2,
@@ -119,6 +125,21 @@ struct btmtk_hci_wmt_params {
u32 *status;
};
+typedef int (*btmtk_reset_sync_func_t)(struct hci_dev *, void *);
+
+struct btmtk_coredump_info {
+ const char *driver_name;
+ u32 fw_version;
+ u16 cnt;
+ int state;
+};
+
+struct btmediatek_data {
+ u32 dev_id;
+ btmtk_reset_sync_func_t reset_sync;
+ struct btmtk_coredump_info cd_info;
+};
+
typedef int (*wmt_cmd_sync_func_t)(struct hci_dev *,
struct btmtk_hci_wmt_params *);
@@ -131,6 +152,13 @@ int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync);
+
+void btmtk_reset_sync(struct hci_dev *hdev);
+
+int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
+ u32 fw_version);
+
+int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb);
#else
static inline int btmtk_set_bdaddr(struct hci_dev *hdev,
@@ -151,4 +179,18 @@ static int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
return -EOPNOTSUPP;
}
+static void btmtk_reset_sync(struct hci_dev *hdev)
+{
+}
+
+static int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
+ u32 fw_version)
+{
+ return -EOPNOTSUPP;
+}
+
+static int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
#endif
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 7680c67cdb35..935feab815d9 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 52ef44688d38..ee6f6c872a34 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -29,16 +29,25 @@
#define BTNXPUART_CHECK_BOOT_SIGNATURE 3
#define BTNXPUART_SERDEV_OPEN 4
-#define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin"
-#define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin"
-#define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin"
-#define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin"
-#define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se"
-#define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin"
+#define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin"
+#define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin"
+#define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin"
+#define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin"
+#define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se"
+#define FIRMWARE_AW693 "nxp/uartaw693_bt.bin"
+#define FIRMWARE_SECURE_AW693 "nxp/uartaw693_bt.bin.se"
+#define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin"
#define CHIP_ID_W9098 0x5c03
#define CHIP_ID_IW416 0x7201
#define CHIP_ID_IW612 0x7601
+#define CHIP_ID_AW693 0x8200
+
+#define FW_SECURE_MASK 0xc0
+#define FW_OPEN 0x00
+#define FW_AUTH_ILLEGAL 0x40
+#define FW_AUTH_PLAIN 0x80
+#define FW_AUTH_ENC 0xc0
#define HCI_NXP_PRI_BAUDRATE 115200
#define HCI_NXP_SEC_BAUDRATE 3000000
@@ -665,6 +674,9 @@ static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name)
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
int err = 0;
+ if (!fw_name)
+ return -ENOENT;
+
if (!strlen(nxpdev->fw_name)) {
snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "%s", fw_name);
@@ -812,7 +824,8 @@ free_skb:
return 0;
}
-static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid)
+static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
+ u8 loader_ver)
{
char *fw_name = NULL;
@@ -826,6 +839,14 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid)
case CHIP_ID_IW612:
fw_name = FIRMWARE_IW612;
break;
+ case CHIP_ID_AW693:
+ if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
+ fw_name = FIRMWARE_AW693;
+ else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
+ fw_name = FIRMWARE_SECURE_AW693;
+ else
+ bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
+ break;
default:
bt_dev_err(hdev, "Unknown chip signature %04x", chipid);
break;
@@ -838,13 +859,15 @@ static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb)
struct v3_start_ind *req = skb_pull_data(skb, sizeof(*req));
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
u16 chip_id;
+ u8 loader_ver;
if (!process_boot_signature(nxpdev))
goto free_skb;
chip_id = le16_to_cpu(req->chip_id);
+ loader_ver = req->loader_ver;
if (!nxp_request_firmware(hdev, nxp_get_fw_name_from_chipid(hdev,
- chip_id)))
+ chip_id, loader_ver)))
nxp_send_ack(NXP_ACK_V3, hdev);
free_skb:
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index e7e58a956d15..6f2187fab55f 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -594,14 +594,20 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Firmware files to download are based on ROM version.
* ROM version is derived from last two bytes of soc_ver.
*/
- rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f);
+ if (soc_type == QCA_WCN3988)
+ rom_ver = ((soc_ver & 0x00000f00) >> 0x05) | (soc_ver & 0x0000000f);
+ else
+ rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f);
if (soc_type == QCA_WCN6750)
qca_send_patch_config_cmd(hdev);
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- if (qca_is_wcn399x(soc_type)) {
+ if (soc_type == QCA_WCN3988) {
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/apbtfw%02x.tlv", rom_ver);
+ } else if (qca_is_wcn399x(soc_type)) {
snprintf(config.fwname, sizeof(config.fwname),
"qca/crbtfw%02x.tlv", rom_ver);
} else if (soc_type == QCA_QCA6390) {
@@ -636,6 +642,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
if (firmware_name)
snprintf(config.fwname, sizeof(config.fwname),
"qca/%s", firmware_name);
+ else if (soc_type == QCA_WCN3988)
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/apnv%02x.bin", rom_ver);
else if (qca_is_wcn399x(soc_type)) {
if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) {
snprintf(config.fwname, sizeof(config.fwname),
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index b884095bcd9d..fc6cf314eb0e 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -142,6 +142,7 @@ enum qca_btsoc_type {
QCA_INVALID = -1,
QCA_AR3002,
QCA_ROME,
+ QCA_WCN3988,
QCA_WCN3990,
QCA_WCN3998,
QCA_WCN3991,
@@ -162,8 +163,15 @@ int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
{
- return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3991 ||
- soc_type == QCA_WCN3998;
+ switch (soc_type) {
+ case QCA_WCN3988:
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+ case QCA_WCN3998:
+ return true;
+ default:
+ return false;
+ }
}
static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type)
{
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index d978e7cea873..84c2c2e1122f 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -32,6 +32,8 @@
#define RTL_ROM_LMP_8851B 0x8851
#define RTL_CONFIG_MAGIC 0x8723ab55
+#define RTL_VSC_OP_COREDUMP 0xfcff
+
#define IC_MATCH_FL_LMPSUBV (1 << 0)
#define IC_MATCH_FL_HCIREV (1 << 1)
#define IC_MATCH_FL_HCIVER (1 << 2)
@@ -81,6 +83,7 @@ struct id_table {
bool has_msft_ext;
char *fw_name;
char *cfg_name;
+ char *hw_info;
};
struct btrtl_device_info {
@@ -101,22 +104,25 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8723A, 0xb, 0x6, HCI_USB),
.config_needed = false,
.has_rom_version = false,
- .fw_name = "rtl_bt/rtl8723a_fw.bin",
- .cfg_name = NULL },
+ .fw_name = "rtl_bt/rtl8723a_fw",
+ .cfg_name = NULL,
+ .hw_info = "rtl8723au" },
/* 8723BS */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xb, 0x6, HCI_UART),
.config_needed = true,
.has_rom_version = true,
- .fw_name = "rtl_bt/rtl8723bs_fw.bin",
- .cfg_name = "rtl_bt/rtl8723bs_config" },
+ .fw_name = "rtl_bt/rtl8723bs_fw",
+ .cfg_name = "rtl_bt/rtl8723bs_config",
+ .hw_info = "rtl8723bs" },
/* 8723B */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xb, 0x6, HCI_USB),
.config_needed = false,
.has_rom_version = true,
- .fw_name = "rtl_bt/rtl8723b_fw.bin",
- .cfg_name = "rtl_bt/rtl8723b_config" },
+ .fw_name = "rtl_bt/rtl8723b_fw",
+ .cfg_name = "rtl_bt/rtl8723b_config",
+ .hw_info = "rtl8723bu" },
/* 8723CS-CG */
{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE |
@@ -126,8 +132,9 @@ static const struct id_table ic_id_table[] = {
.hci_bus = HCI_UART,
.config_needed = true,
.has_rom_version = true,
- .fw_name = "rtl_bt/rtl8723cs_cg_fw.bin",
- .cfg_name = "rtl_bt/rtl8723cs_cg_config" },
+ .fw_name = "rtl_bt/rtl8723cs_cg_fw",
+ .cfg_name = "rtl_bt/rtl8723cs_cg_config",
+ .hw_info = "rtl8723cs-cg" },
/* 8723CS-VF */
{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE |
@@ -137,8 +144,9 @@ static const struct id_table ic_id_table[] = {
.hci_bus = HCI_UART,
.config_needed = true,
.has_rom_version = true,
- .fw_name = "rtl_bt/rtl8723cs_vf_fw.bin",
- .cfg_name = "rtl_bt/rtl8723cs_vf_config" },
+ .fw_name = "rtl_bt/rtl8723cs_vf_fw",
+ .cfg_name = "rtl_bt/rtl8723cs_vf_config",
+ .hw_info = "rtl8723cs-vf" },
/* 8723CS-XX */
{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE |
@@ -148,139 +156,157 @@ static const struct id_table ic_id_table[] = {
.hci_bus = HCI_UART,
.config_needed = true,
.has_rom_version = true,
- .fw_name = "rtl_bt/rtl8723cs_xx_fw.bin",
- .cfg_name = "rtl_bt/rtl8723cs_xx_config" },
+ .fw_name = "rtl_bt/rtl8723cs_xx_fw",
+ .cfg_name = "rtl_bt/rtl8723cs_xx_config",
+ .hw_info = "rtl8723cs" },
/* 8723D */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_USB),
.config_needed = true,
.has_rom_version = true,
- .fw_name = "rtl_bt/rtl8723d_fw.bin",
- .cfg_name = "rtl_bt/rtl8723d_config" },
+ .fw_name = "rtl_bt/rtl8723d_fw",
+ .cfg_name = "rtl_bt/rtl8723d_config",
+ .hw_info = "rtl8723du" },
/* 8723DS */
{ IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_UART),
.config_needed = true,
.has_rom_version = true,
- .fw_name = "rtl_bt/rtl8723ds_fw.bin",
- .cfg_name = "rtl_bt/rtl8723ds_config" },
+ .fw_name = "rtl_bt/rtl8723ds_fw",
+ .cfg_name = "rtl_bt/rtl8723ds_config",
+ .hw_info = "rtl8723ds" },
/* 8821A */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xa, 0x6, HCI_USB),
.config_needed = false,
.has_rom_version = true,
- .fw_name = "rtl_bt/rtl8821a_fw.bin",
- .cfg_name = "rtl_bt/rtl8821a_config" },
+ .fw_name = "rtl_bt/rtl8821a_fw",
+ .cfg_name = "rtl_bt/rtl8821a_config",
+ .hw_info = "rtl8821au" },
/* 8821C */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_USB),
.config_needed = false,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8821c_fw.bin",
- .cfg_name = "rtl_bt/rtl8821c_config" },
+ .fw_name = "rtl_bt/rtl8821c_fw",
+ .cfg_name = "rtl_bt/rtl8821c_config",
+ .hw_info = "rtl8821cu" },
/* 8821CS */
{ IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_UART),
.config_needed = true,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8821cs_fw.bin",
- .cfg_name = "rtl_bt/rtl8821cs_config" },
+ .fw_name = "rtl_bt/rtl8821cs_fw",
+ .cfg_name = "rtl_bt/rtl8821cs_config",
+ .hw_info = "rtl8821cs" },
/* 8761A */
{ IC_INFO(RTL_ROM_LMP_8761A, 0xa, 0x6, HCI_USB),
.config_needed = false,
.has_rom_version = true,
- .fw_name = "rtl_bt/rtl8761a_fw.bin",
- .cfg_name = "rtl_bt/rtl8761a_config" },
+ .fw_name = "rtl_bt/rtl8761a_fw",
+ .cfg_name = "rtl_bt/rtl8761a_config",
+ .hw_info = "rtl8761au" },
/* 8761B */
{ IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_UART),
.config_needed = false,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8761b_fw.bin",
- .cfg_name = "rtl_bt/rtl8761b_config" },
+ .fw_name = "rtl_bt/rtl8761b_fw",
+ .cfg_name = "rtl_bt/rtl8761b_config",
+ .hw_info = "rtl8761btv" },
/* 8761BU */
{ IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_USB),
.config_needed = false,
.has_rom_version = true,
- .fw_name = "rtl_bt/rtl8761bu_fw.bin",
- .cfg_name = "rtl_bt/rtl8761bu_config" },
+ .fw_name = "rtl_bt/rtl8761bu_fw",
+ .cfg_name = "rtl_bt/rtl8761bu_config",
+ .hw_info = "rtl8761bu" },
/* 8822C with UART interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0x8, HCI_UART),
.config_needed = true,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8822cs_fw.bin",
- .cfg_name = "rtl_bt/rtl8822cs_config" },
+ .fw_name = "rtl_bt/rtl8822cs_fw",
+ .cfg_name = "rtl_bt/rtl8822cs_config",
+ .hw_info = "rtl8822cs" },
/* 8822C with UART interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART),
.config_needed = true,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8822cs_fw.bin",
- .cfg_name = "rtl_bt/rtl8822cs_config" },
+ .fw_name = "rtl_bt/rtl8822cs_fw",
+ .cfg_name = "rtl_bt/rtl8822cs_config",
+ .hw_info = "rtl8822cs" },
/* 8822C with USB interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_USB),
.config_needed = false,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8822cu_fw.bin",
- .cfg_name = "rtl_bt/rtl8822cu_config" },
+ .fw_name = "rtl_bt/rtl8822cu_fw",
+ .cfg_name = "rtl_bt/rtl8822cu_config",
+ .hw_info = "rtl8822cu" },
/* 8822B */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb, 0x7, HCI_USB),
.config_needed = true,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8822b_fw.bin",
- .cfg_name = "rtl_bt/rtl8822b_config" },
+ .fw_name = "rtl_bt/rtl8822b_fw",
+ .cfg_name = "rtl_bt/rtl8822b_config",
+ .hw_info = "rtl8822bu" },
/* 8852A */
{ IC_INFO(RTL_ROM_LMP_8852A, 0xa, 0xb, HCI_USB),
.config_needed = false,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8852au_fw.bin",
- .cfg_name = "rtl_bt/rtl8852au_config" },
+ .fw_name = "rtl_bt/rtl8852au_fw",
+ .cfg_name = "rtl_bt/rtl8852au_config",
+ .hw_info = "rtl8852au" },
/* 8852B with UART interface */
{ IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_UART),
.config_needed = true,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8852bs_fw.bin",
- .cfg_name = "rtl_bt/rtl8852bs_config" },
+ .fw_name = "rtl_bt/rtl8852bs_fw",
+ .cfg_name = "rtl_bt/rtl8852bs_config",
+ .hw_info = "rtl8852bs" },
/* 8852B */
{ IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_USB),
.config_needed = false,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8852bu_fw.bin",
- .cfg_name = "rtl_bt/rtl8852bu_config" },
+ .fw_name = "rtl_bt/rtl8852bu_fw",
+ .cfg_name = "rtl_bt/rtl8852bu_config",
+ .hw_info = "rtl8852bu" },
/* 8852C */
{ IC_INFO(RTL_ROM_LMP_8852A, 0xc, 0xc, HCI_USB),
.config_needed = false,
.has_rom_version = true,
.has_msft_ext = true,
- .fw_name = "rtl_bt/rtl8852cu_fw.bin",
- .cfg_name = "rtl_bt/rtl8852cu_config" },
+ .fw_name = "rtl_bt/rtl8852cu_fw",
+ .cfg_name = "rtl_bt/rtl8852cu_config",
+ .hw_info = "rtl8852cu" },
/* 8851B */
{ IC_INFO(RTL_ROM_LMP_8851B, 0xb, 0xc, HCI_USB),
.config_needed = false,
.has_rom_version = true,
.has_msft_ext = false,
- .fw_name = "rtl_bt/rtl8851bu_fw.bin",
- .cfg_name = "rtl_bt/rtl8851bu_config" },
+ .fw_name = "rtl_bt/rtl8851bu_fw",
+ .cfg_name = "rtl_bt/rtl8851bu_config",
+ .hw_info = "rtl8851bu" },
};
static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
@@ -590,6 +616,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
unsigned char **_buf)
{
static const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
+ struct btrealtek_data *coredump_info = hci_get_priv(hdev);
struct rtl_epatch_header *epatch_info;
unsigned char *buf;
int i, len;
@@ -705,8 +732,10 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data;
num_patches = le16_to_cpu(epatch_info->num_patches);
+
BT_DBG("fw_version=%x, num_patches=%d",
le32_to_cpu(epatch_info->fw_version), num_patches);
+ coredump_info->rtl_dump.fw_version = le32_to_cpu(epatch_info->fw_version);
/* After the rtl_epatch_header there is a funky patch metadata section.
* Assuming 2 patches, the layout is:
@@ -903,6 +932,53 @@ out:
return ret;
}
+static void btrtl_coredump(struct hci_dev *hdev)
+{
+ static const u8 param[] = { 0x00, 0x00 };
+
+ __hci_cmd_send(hdev, RTL_VSC_OP_COREDUMP, sizeof(param), param);
+}
+
+static void btrtl_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btrealtek_data *coredump_info = hci_get_priv(hdev);
+ char buf[80];
+
+ if (coredump_info->rtl_dump.controller)
+ snprintf(buf, sizeof(buf), "Controller Name: %s\n",
+ coredump_info->rtl_dump.controller);
+ else
+ snprintf(buf, sizeof(buf), "Controller Name: Unknown\n");
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Firmware Version: 0x%X\n",
+ coredump_info->rtl_dump.fw_version);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Driver: %s\n", coredump_info->rtl_dump.driver_name);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Vendor: Realtek\n");
+ skb_put_data(skb, buf, strlen(buf));
+}
+
+static int btrtl_register_devcoredump_support(struct hci_dev *hdev)
+{
+ int err;
+
+ err = hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL);
+
+ return err;
+}
+
+void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name)
+{
+ struct btrealtek_data *coredump_info = hci_get_priv(hdev);
+
+ coredump_info->rtl_dump.driver_name = driver_name;
+}
+EXPORT_SYMBOL_GPL(btrtl_set_driver_name);
+
static bool rtl_has_chip_type(u16 lmp_subver)
{
switch (lmp_subver) {
@@ -964,15 +1040,16 @@ EXPORT_SYMBOL_GPL(btrtl_free);
struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
const char *postfix)
{
+ struct btrealtek_data *coredump_info = hci_get_priv(hdev);
struct btrtl_device_info *btrtl_dev;
struct sk_buff *skb;
struct hci_rp_read_local_version *resp;
+ struct hci_command_hdr *cmd;
+ char fw_name[40];
char cfg_name[40];
u16 hci_rev, lmp_subver;
u8 hci_ver, lmp_ver, chip_type = 0;
int ret;
- u16 opcode;
- u8 cmd[2];
u8 reg_val[2];
btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
@@ -1041,15 +1118,14 @@ next:
btrtl_dev->drop_fw = false;
if (btrtl_dev->drop_fw) {
- opcode = hci_opcode_pack(0x3f, 0x66);
- cmd[0] = opcode & 0xff;
- cmd[1] = opcode >> 8;
-
- skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
+ skb = bt_skb_alloc(sizeof(*cmd), GFP_KERNEL);
if (!skb)
goto err_free;
- skb_put_data(skb, cmd, sizeof(cmd));
+ cmd = skb_put(skb, HCI_COMMAND_HDR_SIZE);
+ cmd->opcode = cpu_to_le16(0xfc66);
+ cmd->plen = 0;
+
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
ret = hdev->send(hdev, skb);
@@ -1079,8 +1155,26 @@ next:
goto err_free;
}
- btrtl_dev->fw_len = rtl_load_file(hdev, btrtl_dev->ic_info->fw_name,
- &btrtl_dev->fw_data);
+ if (!btrtl_dev->ic_info->fw_name) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ btrtl_dev->fw_len = -EIO;
+ if (lmp_subver == RTL_ROM_LMP_8852A && hci_rev == 0x000c) {
+ snprintf(fw_name, sizeof(fw_name), "%s_v2.bin",
+ btrtl_dev->ic_info->fw_name);
+ btrtl_dev->fw_len = rtl_load_file(hdev, fw_name,
+ &btrtl_dev->fw_data);
+ }
+
+ if (btrtl_dev->fw_len < 0) {
+ snprintf(fw_name, sizeof(fw_name), "%s.bin",
+ btrtl_dev->ic_info->fw_name);
+ btrtl_dev->fw_len = rtl_load_file(hdev, fw_name,
+ &btrtl_dev->fw_data);
+ }
+
if (btrtl_dev->fw_len < 0) {
rtl_dev_err(hdev, "firmware file %s not found",
btrtl_dev->ic_info->fw_name);
@@ -1113,6 +1207,9 @@ next:
if (btrtl_dev->ic_info->has_msft_ext)
hci_set_msft_opcode(hdev, 0xFCF0);
+ if (btrtl_dev->ic_info)
+ coredump_info->rtl_dump.controller = btrtl_dev->ic_info->hw_info;
+
return btrtl_dev;
err_free:
@@ -1125,6 +1222,8 @@ EXPORT_SYMBOL_GPL(btrtl_initialize);
int btrtl_download_firmware(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev)
{
+ int err = 0;
+
/* Match a set of subver values that correspond to stock firmware,
* which is not compatible with standard btusb.
* If matched, upload an alternative firmware that does conform to
@@ -1133,12 +1232,14 @@ int btrtl_download_firmware(struct hci_dev *hdev,
*/
if (!btrtl_dev->ic_info) {
rtl_dev_info(hdev, "assuming no firmware upload needed");
- return 0;
+ err = 0;
+ goto done;
}
switch (btrtl_dev->ic_info->lmp_subver) {
case RTL_ROM_LMP_8723A:
- return btrtl_setup_rtl8723a(hdev, btrtl_dev);
+ err = btrtl_setup_rtl8723a(hdev, btrtl_dev);
+ break;
case RTL_ROM_LMP_8723B:
case RTL_ROM_LMP_8821A:
case RTL_ROM_LMP_8761A:
@@ -1146,11 +1247,18 @@ int btrtl_download_firmware(struct hci_dev *hdev,
case RTL_ROM_LMP_8852A:
case RTL_ROM_LMP_8703B:
case RTL_ROM_LMP_8851B:
- return btrtl_setup_rtl8723b(hdev, btrtl_dev);
+ err = btrtl_setup_rtl8723b(hdev, btrtl_dev);
+ break;
default:
rtl_dev_info(hdev, "assuming no firmware upload needed");
- return 0;
+ break;
}
+
+done:
+ if (!err)
+ err = btrtl_register_devcoredump_support(hdev);
+
+ return err;
}
EXPORT_SYMBOL_GPL(btrtl_download_firmware);
@@ -1180,6 +1288,10 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
if (btrtl_dev->project_id == CHIP_ID_8852C)
btrealtek_set_flag(hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP);
+ if (btrtl_dev->project_id == CHIP_ID_8852A ||
+ btrtl_dev->project_id == CHIP_ID_8852C)
+ set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks);
+
hci_set_aosp_capable(hdev);
break;
default:
@@ -1398,4 +1510,5 @@ MODULE_FIRMWARE("rtl_bt/rtl8852bs_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw_v2.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
index adb4c2c9abc5..a2d9d34f9fb0 100644
--- a/drivers/bluetooth/btrtl.h
+++ b/drivers/bluetooth/btrtl.h
@@ -109,8 +109,16 @@ enum {
__REALTEK_NUM_FLAGS,
};
+struct rtl_dump_info {
+ const char *driver_name;
+ char *controller;
+ u32 fw_version;
+};
+
struct btrealtek_data {
DECLARE_BITMAP(flags, __REALTEK_NUM_FLAGS);
+
+ struct rtl_dump_info rtl_dump;
};
#define btrealtek_set_flag(hdev, nr) \
@@ -139,6 +147,7 @@ int btrtl_get_uart_settings(struct hci_dev *hdev,
struct btrtl_device_info *btrtl_dev,
unsigned int *controller_baudrate,
u32 *device_baudrate, bool *flow_control);
+void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name);
#else
@@ -182,4 +191,8 @@ static inline int btrtl_get_uart_settings(struct hci_dev *hdev,
return -ENOENT;
}
+static inline void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name)
+{
+}
+
#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 764d176e9735..1bb3b09013b0 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -476,6 +476,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0035), .driver_info = BTUSB_INTEL_COMBINED },
+ { USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED |
BTUSB_INTEL_NO_WBS_SUPPORT |
@@ -625,9 +626,24 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0e4), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0f1), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0f6), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -860,10 +876,26 @@ static void btusb_intel_cmd_timeout(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
+ struct btintel_data *intel_data = hci_get_priv(hdev);
if (++data->cmd_timeout_cnt < 5)
return;
+ if (intel_data->acpi_reset_method) {
+ if (test_and_set_bit(INTEL_ACPI_RESET_ACTIVE, intel_data->flags)) {
+ bt_dev_err(hdev, "acpi: last reset failed ? Not resetting again");
+ return;
+ }
+
+ bt_dev_err(hdev, "Initiating acpi reset method");
+ /* If ACPI reset method fails, lets try with legacy GPIO
+ * toggling
+ */
+ if (!intel_data->acpi_reset_method(hdev)) {
+ return;
+ }
+ }
+
if (!reset_gpio) {
btusb_reset(hdev);
return;
@@ -887,10 +919,49 @@ static void btusb_intel_cmd_timeout(struct hci_dev *hdev)
gpiod_set_value_cansleep(reset_gpio, 0);
}
+#define RTK_DEVCOREDUMP_CODE_MEMDUMP 0x01
+#define RTK_DEVCOREDUMP_CODE_HW_ERR 0x02
+#define RTK_DEVCOREDUMP_CODE_CMD_TIMEOUT 0x03
+
+#define RTK_SUB_EVENT_CODE_COREDUMP 0x34
+
+struct rtk_dev_coredump_hdr {
+ u8 type;
+ u8 code;
+ u8 reserved[2];
+} __packed;
+
+static inline void btusb_rtl_alloc_devcoredump(struct hci_dev *hdev,
+ struct rtk_dev_coredump_hdr *hdr, u8 *buf, u32 len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len + sizeof(*hdr), GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb_put_data(skb, hdr, sizeof(*hdr));
+ if (len)
+ skb_put_data(skb, buf, len);
+
+ if (!hci_devcd_init(hdev, skb->len)) {
+ hci_devcd_append(hdev, skb);
+ hci_devcd_complete(hdev);
+ } else {
+ bt_dev_err(hdev, "RTL: Failed to generate devcoredump");
+ kfree_skb(skb);
+ }
+}
+
static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
+ struct rtk_dev_coredump_hdr hdr = {
+ .type = RTK_DEVCOREDUMP_CODE_CMD_TIMEOUT,
+ };
+
+ btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
if (++data->cmd_timeout_cnt < 5)
return;
@@ -917,6 +988,18 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
gpiod_set_value_cansleep(reset_gpio, 0);
}
+static void btusb_rtl_hw_error(struct hci_dev *hdev, u8 code)
+{
+ struct rtk_dev_coredump_hdr hdr = {
+ .type = RTK_DEVCOREDUMP_CODE_HW_ERR,
+ .code = code,
+ };
+
+ bt_dev_err(hdev, "RTL: hw err, trigger devcoredump (%d)", code);
+
+ btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
+}
+
static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -2409,79 +2492,6 @@ static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer,
return btusb_recv_bulk(data, buffer, count);
}
-static int btusb_intel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct intel_tlv *tlv = (void *)&skb->data[5];
-
- /* The first event is always an event type TLV */
- if (tlv->type != INTEL_TLV_TYPE_ID)
- goto recv_frame;
-
- switch (tlv->val[0]) {
- case INTEL_TLV_SYSTEM_EXCEPTION:
- case INTEL_TLV_FATAL_EXCEPTION:
- case INTEL_TLV_DEBUG_EXCEPTION:
- case INTEL_TLV_TEST_EXCEPTION:
- /* Generate devcoredump from exception */
- if (!hci_devcd_init(hdev, skb->len)) {
- hci_devcd_append(hdev, skb);
- hci_devcd_complete(hdev);
- } else {
- bt_dev_err(hdev, "Failed to generate devcoredump");
- kfree_skb(skb);
- }
- return 0;
- default:
- bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]);
- }
-
-recv_frame:
- return hci_recv_frame(hdev, skb);
-}
-
-static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_event_hdr *hdr = (void *)skb->data;
- const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 };
-
- if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
- hdr->plen > 0) {
- const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
- unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
-
- if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
- switch (skb->data[2]) {
- case 0x02:
- /* When switching to the operational firmware
- * the device sends a vendor specific event
- * indicating that the bootup completed.
- */
- btintel_bootup(hdev, ptr, len);
- break;
- case 0x06:
- /* When the firmware loading completes the
- * device sends out a vendor specific event
- * indicating the result of the firmware
- * loading.
- */
- btintel_secure_send_result(hdev, ptr, len);
- break;
- }
- }
-
- /* Handle all diagnostics events separately. May still call
- * hci_recv_frame.
- */
- if (len >= sizeof(diagnostics_hdr) &&
- memcmp(&skb->data[2], diagnostics_hdr,
- sizeof(diagnostics_hdr)) == 0) {
- return btusb_intel_diagnostics(hdev, skb);
- }
- }
-
- return hci_recv_frame(hdev, skb);
-}
-
static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
{
struct urb *urb;
@@ -2562,6 +2572,25 @@ static int btusb_setup_realtek(struct hci_dev *hdev)
return ret;
}
+static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ if (skb->data[0] == HCI_VENDOR_PKT && skb->data[2] == RTK_SUB_EVENT_CODE_COREDUMP) {
+ struct rtk_dev_coredump_hdr hdr = {
+ .code = RTK_DEVCOREDUMP_CODE_MEMDUMP,
+ };
+
+ bt_dev_dbg(hdev, "RTL: received coredump vendor evt, len %u",
+ skb->len);
+
+ btusb_rtl_alloc_devcoredump(hdev, &hdr, skb->data, skb->len);
+ kfree_skb(skb);
+
+ return 0;
+ }
+
+ return hci_recv_frame(hdev, skb);
+}
+
/* UHW CR mapping */
#define MTK_BT_MISC 0x70002510
#define MTK_BT_SUBSYS_RST 0x70002610
@@ -2571,8 +2600,9 @@ static int btusb_setup_realtek(struct hci_dev *hdev)
#define MTK_EP_RST_OPT 0x74011890
#define MTK_EP_RST_IN_OUT_OPT 0x00010001
#define MTK_BT_RST_DONE 0x00000100
-#define MTK_BT_RESET_WAIT_MS 100
-#define MTK_BT_RESET_NUM_TRIES 10
+#define MTK_BT_RESET_REG_CONNV3 0x70028610
+#define MTK_BT_READ_DEV_ID 0x70010200
+
static void btusb_mtk_wmt_recv(struct urb *urb)
{
@@ -2943,6 +2973,88 @@ static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id)
return btusb_mtk_reg_read(data, reg, id);
}
+static u32 btusb_mtk_reset_done(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ u32 val = 0;
+
+ btusb_mtk_uhw_reg_read(data, MTK_BT_MISC, &val);
+
+ return val & MTK_BT_RST_DONE;
+}
+
+static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct btmediatek_data *mediatek;
+ u32 val;
+ int err;
+
+ /* It's MediaTek specific bluetooth reset mechanism via USB */
+ if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) {
+ bt_dev_err(hdev, "last reset failed? Not resetting again");
+ return -EBUSY;
+ }
+
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ return err;
+
+ btusb_stop_traffic(data);
+ usb_kill_anchored_urbs(&data->tx_anchor);
+ mediatek = hci_get_priv(hdev);
+
+ if (mediatek->dev_id == 0x7925) {
+ btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
+ val |= (1 << 5);
+ btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
+ btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
+ val &= 0xFFFF00FF;
+ val |= (1 << 13);
+ btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
+ btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, 0x00010001);
+ btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
+ val |= (1 << 0);
+ btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
+ btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF);
+ btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val);
+ btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF);
+ btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val);
+ msleep(100);
+ } else {
+ /* It's Device EndPoint Reset Option Register */
+ bt_dev_dbg(hdev, "Initiating reset mechanism via uhw");
+ btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
+ btusb_mtk_uhw_reg_read(data, MTK_BT_WDT_STATUS, &val);
+
+ /* Reset the bluetooth chip via USB interface. */
+ btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 1);
+ btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF);
+ btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val);
+ btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF);
+ btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val);
+ /* MT7921 need to delay 20ms between toggle reset bit */
+ msleep(20);
+ btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 0);
+ btusb_mtk_uhw_reg_read(data, MTK_BT_SUBSYS_RST, &val);
+ }
+
+ err = readx_poll_timeout(btusb_mtk_reset_done, hdev, val,
+ val & MTK_BT_RST_DONE, 20000, 1000000);
+ if (err < 0)
+ bt_dev_err(hdev, "Reset timeout");
+
+ btusb_mtk_id_get(data, 0x70010200, &val);
+ if (!val)
+ bt_dev_err(hdev, "Can't get device id, subsys reset fail.");
+
+ usb_queue_reset_device(data->intf);
+
+ clear_bit(BTUSB_HW_RESET_ACTIVE, &data->flags);
+
+ return err;
+}
+
static int btusb_mtk_setup(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -2953,10 +3065,11 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
struct sk_buff *skb;
const char *fwname;
int err, status;
- u32 dev_id;
+ u32 dev_id = 0;
char fw_bin_name[64];
u32 fw_version = 0;
u8 param;
+ struct btmediatek_data *mediatek;
calltime = ktime_get();
@@ -2966,7 +3079,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
return err;
}
- if (!dev_id) {
+ if (!dev_id || dev_id != 0x7663) {
err = btusb_mtk_id_get(data, 0x70010200, &dev_id);
if (err < 0) {
bt_dev_err(hdev, "Failed to get device id (%d)", err);
@@ -2979,6 +3092,14 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
}
}
+ mediatek = hci_get_priv(hdev);
+ mediatek->dev_id = dev_id;
+ mediatek->reset_sync = btusb_mtk_reset;
+
+ err = btmtk_register_coredump(hdev, btusb_driver.name, fw_version);
+ if (err < 0)
+ bt_dev_err(hdev, "Failed to register coredump (%d)", err);
+
switch (dev_id) {
case 0x7663:
fwname = FIRMWARE_MT7663;
@@ -2988,9 +3109,16 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
break;
case 0x7922:
case 0x7961:
- snprintf(fw_bin_name, sizeof(fw_bin_name),
- "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
- dev_id & 0xffff, (fw_version & 0xff) + 1);
+ case 0x7925:
+ if (dev_id == 0x7925)
+ snprintf(fw_bin_name, sizeof(fw_bin_name),
+ "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+ dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1);
+ else
+ snprintf(fw_bin_name, sizeof(fw_bin_name),
+ "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
+ dev_id & 0xffff, (fw_version & 0xff) + 1);
+
err = btmtk_setup_firmware_79xx(hdev, fw_bin_name,
btusb_mtk_hci_wmt_sync);
if (err < 0) {
@@ -3128,67 +3256,11 @@ static int btusb_mtk_shutdown(struct hci_dev *hdev)
return 0;
}
-static void btusb_mtk_cmd_timeout(struct hci_dev *hdev)
-{
- struct btusb_data *data = hci_get_drvdata(hdev);
- u32 val;
- int err, retry = 0;
-
- /* It's MediaTek specific bluetooth reset mechanism via USB */
- if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) {
- bt_dev_err(hdev, "last reset failed? Not resetting again");
- return;
- }
-
- err = usb_autopm_get_interface(data->intf);
- if (err < 0)
- return;
-
- btusb_stop_traffic(data);
- usb_kill_anchored_urbs(&data->tx_anchor);
-
- /* It's Device EndPoint Reset Option Register */
- bt_dev_dbg(hdev, "Initiating reset mechanism via uhw");
- btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
- btusb_mtk_uhw_reg_read(data, MTK_BT_WDT_STATUS, &val);
-
- /* Reset the bluetooth chip via USB interface. */
- btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 1);
- btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF);
- btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val);
- btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF);
- btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val);
- /* MT7921 need to delay 20ms between toggle reset bit */
- msleep(20);
- btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 0);
- btusb_mtk_uhw_reg_read(data, MTK_BT_SUBSYS_RST, &val);
-
- /* Poll the register until reset is completed */
- do {
- btusb_mtk_uhw_reg_read(data, MTK_BT_MISC, &val);
- if (val & MTK_BT_RST_DONE) {
- bt_dev_dbg(hdev, "Bluetooth Reset Successfully");
- break;
- }
-
- bt_dev_dbg(hdev, "Polling Bluetooth Reset CR");
- retry++;
- msleep(MTK_BT_RESET_WAIT_MS);
- } while (retry < MTK_BT_RESET_NUM_TRIES);
-
- btusb_mtk_id_get(data, 0x70010200, &val);
- if (!val)
- bt_dev_err(hdev, "Can't get device id, subsys reset fail.");
-
- usb_queue_reset_device(data->intf);
-
- clear_bit(BTUSB_HW_RESET_ACTIVE, &data->flags);
-}
-
static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btusb_data *data = hci_get_drvdata(hdev);
u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
+ struct sk_buff *skb_cd;
switch (handle) {
case 0xfc6f: /* Firmware dump from device */
@@ -3196,6 +3268,15 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
* suspend and thus disable auto-suspend.
*/
usb_disable_autosuspend(data->udev);
+
+ /* We need to forward the diagnostic packet to userspace daemon
+ * for backward compatibility, so we have to clone the packet
+ * extraly for the in-kernel coredump support.
+ */
+ skb_cd = skb_clone(skb, GFP_ATOMIC);
+ if (skb_cd)
+ btmtk_process_coredump(hdev, skb_cd);
+
fallthrough;
case 0x05ff: /* Firmware debug logging 1 */
case 0x05fe: /* Firmware debug logging 2 */
@@ -4196,11 +4277,16 @@ static int btusb_probe(struct usb_interface *intf,
priv_size += sizeof(struct btintel_data);
/* Override the rx handlers */
- data->recv_event = btusb_recv_event_intel;
+ data->recv_event = btintel_recv_event;
data->recv_bulk = btusb_recv_bulk_intel;
} else if (id->driver_info & BTUSB_REALTEK) {
/* Allocate extra space for Realtek device */
priv_size += sizeof(struct btrealtek_data);
+
+ data->recv_event = btusb_recv_event_realtek;
+ } else if (id->driver_info & BTUSB_MEDIATEK) {
+ /* Allocate extra space for Mediatek device */
+ priv_size += sizeof(struct btmediatek_data);
}
data->recv_acl = hci_recv_frame;
@@ -4307,7 +4393,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->setup = btusb_mtk_setup;
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
- hdev->cmd_timeout = btusb_mtk_cmd_timeout;
+ hdev->cmd_timeout = btmtk_reset_sync;
hdev->set_bdaddr = btmtk_set_bdaddr;
set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
@@ -4364,9 +4450,11 @@ static int btusb_probe(struct usb_interface *intf,
if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) &&
(id->driver_info & BTUSB_REALTEK)) {
+ btrtl_set_driver_name(hdev, btusb_driver.name);
hdev->setup = btusb_setup_realtek;
hdev->shutdown = btrtl_shutdown_realtek;
hdev->cmd_timeout = btusb_rtl_cmd_timeout;
+ hdev->hw_error = btusb_rtl_hw_error;
/* Realtek devices need to set remote wakeup on auto-suspend */
set_bit(BTUSB_WAKEUP_AUTOSUSPEND, &data->flags);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index fefc37b98b4a..71e748a9477e 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -11,7 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/serdev.h>
#include <linux/skbuff.h>
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index efdda2c3fce8..a76eb98c0047 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -770,7 +770,8 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
break;
case HCIUARTGETPROTO:
- if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_SET, &hu->flags) &&
+ test_bit(HCI_UART_PROTO_READY, &hu->flags))
err = hu->proto->id;
else
err = -EUNATCH;
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 05f7f6de6863..97da0b2bfd17 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -734,7 +734,11 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev)
return err;
}
- clk_prepare_enable(sysclk);
+ err = clk_prepare_enable(sysclk);
+ if (err) {
+ dev_err(dev, "could not enable sysclk: %d", err);
+ return err;
+ }
btdev->sysclk_speed = clk_get_rate(sysclk);
clk_disable_unprepare(sysclk);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index e30c979535b1..011822519602 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -25,7 +25,7 @@
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -117,9 +117,7 @@ enum qca_memdump_states {
QCA_MEMDUMP_TIMEOUT,
};
-struct qca_memdump_data {
- char *memdump_buf_head;
- char *memdump_buf_tail;
+struct qca_memdump_info {
u32 current_seq_no;
u32 received_dump;
u32 ram_dump_size;
@@ -160,13 +158,15 @@ struct qca_data {
struct work_struct ws_tx_vote_off;
struct work_struct ctrl_memdump_evt;
struct delayed_work ctrl_memdump_timeout;
- struct qca_memdump_data *qca_memdump;
+ struct qca_memdump_info *qca_memdump;
unsigned long flags;
struct completion drop_ev_comp;
wait_queue_head_t suspend_wait_q;
enum qca_memdump_states memdump_state;
struct mutex hci_memdump_lock;
+ u16 fw_version;
+ u16 controller_id;
/* For debugging purpose */
u64 ibs_sent_wacks;
u64 ibs_sent_slps;
@@ -233,6 +233,7 @@ static void qca_regulator_disable(struct qca_serdev *qcadev);
static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
static void qca_controller_memdump(struct work_struct *work);
+static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb);
static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
{
@@ -980,6 +981,28 @@ static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb);
}
+static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct qca_data *qca = hu->priv;
+ char buf[80];
+
+ snprintf(buf, sizeof(buf), "Controller Name: 0x%x\n",
+ qca->controller_id);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Firmware Version: 0x%x\n",
+ qca->fw_version);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Vendor:Qualcomm\n");
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Driver: %s\n",
+ hu->serdev->dev.driver->name);
+ skb_put_data(skb, buf, strlen(buf));
+}
+
static void qca_controller_memdump(struct work_struct *work)
{
struct qca_data *qca = container_of(work, struct qca_data,
@@ -987,13 +1010,11 @@ static void qca_controller_memdump(struct work_struct *work)
struct hci_uart *hu = qca->hu;
struct sk_buff *skb;
struct qca_memdump_event_hdr *cmd_hdr;
- struct qca_memdump_data *qca_memdump = qca->qca_memdump;
+ struct qca_memdump_info *qca_memdump = qca->qca_memdump;
struct qca_dump_size *dump;
- char *memdump_buf;
- char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
u16 seq_no;
- u32 dump_size;
u32 rx_size;
+ int ret = 0;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
@@ -1009,7 +1030,7 @@ static void qca_controller_memdump(struct work_struct *work)
}
if (!qca_memdump) {
- qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
+ qca_memdump = kzalloc(sizeof(struct qca_memdump_info),
GFP_ATOMIC);
if (!qca_memdump) {
mutex_unlock(&qca->hci_memdump_lock);
@@ -1035,44 +1056,49 @@ static void qca_controller_memdump(struct work_struct *work)
set_bit(QCA_IBS_DISABLED, &qca->flags);
set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
dump = (void *) skb->data;
- dump_size = __le32_to_cpu(dump->dump_size);
- if (!(dump_size)) {
+ qca_memdump->ram_dump_size = __le32_to_cpu(dump->dump_size);
+ if (!(qca_memdump->ram_dump_size)) {
bt_dev_err(hu->hdev, "Rx invalid memdump size");
kfree(qca_memdump);
kfree_skb(skb);
- qca->qca_memdump = NULL;
mutex_unlock(&qca->hci_memdump_lock);
return;
}
- bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
- dump_size);
queue_delayed_work(qca->workqueue,
&qca->ctrl_memdump_timeout,
- msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)
- );
-
- skb_pull(skb, sizeof(dump_size));
- memdump_buf = vmalloc(dump_size);
- qca_memdump->ram_dump_size = dump_size;
- qca_memdump->memdump_buf_head = memdump_buf;
- qca_memdump->memdump_buf_tail = memdump_buf;
- }
+ msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
+ skb_pull(skb, sizeof(qca_memdump->ram_dump_size));
+ qca_memdump->current_seq_no = 0;
+ qca_memdump->received_dump = 0;
+ ret = hci_devcd_init(hu->hdev, qca_memdump->ram_dump_size);
+ bt_dev_info(hu->hdev, "hci_devcd_init Return:%d",
+ ret);
+ if (ret < 0) {
+ kfree(qca->qca_memdump);
+ qca->qca_memdump = NULL;
+ qca->memdump_state = QCA_MEMDUMP_COLLECTED;
+ cancel_delayed_work(&qca->ctrl_memdump_timeout);
+ clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
+ mutex_unlock(&qca->hci_memdump_lock);
+ return;
+ }
+
+ bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
+ qca_memdump->ram_dump_size);
- memdump_buf = qca_memdump->memdump_buf_tail;
+ }
/* If sequence no 0 is missed then there is no point in
* accepting the other sequences.
*/
- if (!memdump_buf) {
+ if (!test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
bt_dev_err(hu->hdev, "QCA: Discarding other packets");
kfree(qca_memdump);
kfree_skb(skb);
- qca->qca_memdump = NULL;
mutex_unlock(&qca->hci_memdump_lock);
return;
}
-
/* There could be chance of missing some packets from
* the controller. In such cases let us store the dummy
* packets in the buffer.
@@ -1082,8 +1108,8 @@ static void qca_controller_memdump(struct work_struct *work)
* bits, so skip this checking for missing packet.
*/
while ((seq_no > qca_memdump->current_seq_no + 1) &&
- (soc_type != QCA_QCA6390) &&
- seq_no != QCA_LAST_SEQUENCE_NUM) {
+ (soc_type != QCA_QCA6390) &&
+ seq_no != QCA_LAST_SEQUENCE_NUM) {
bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
qca_memdump->current_seq_no);
rx_size = qca_memdump->received_dump;
@@ -1094,43 +1120,38 @@ static void qca_controller_memdump(struct work_struct *work)
qca_memdump->received_dump);
break;
}
- memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE);
- memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE;
+ hci_devcd_append_pattern(hu->hdev, 0x00,
+ QCA_DUMP_PACKET_SIZE);
qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
qca_memdump->current_seq_no++;
}
- rx_size = qca_memdump->received_dump + skb->len;
+ rx_size = qca_memdump->received_dump + skb->len;
if (rx_size <= qca_memdump->ram_dump_size) {
if ((seq_no != QCA_LAST_SEQUENCE_NUM) &&
- (seq_no != qca_memdump->current_seq_no))
+ (seq_no != qca_memdump->current_seq_no)) {
bt_dev_err(hu->hdev,
"QCA memdump unexpected packet %d",
seq_no);
+ }
bt_dev_dbg(hu->hdev,
"QCA memdump packet %d with length %d",
seq_no, skb->len);
- memcpy(memdump_buf, (unsigned char *)skb->data,
- skb->len);
- memdump_buf = memdump_buf + skb->len;
- qca_memdump->memdump_buf_tail = memdump_buf;
- qca_memdump->current_seq_no = seq_no + 1;
- qca_memdump->received_dump += skb->len;
+ hci_devcd_append(hu->hdev, skb);
+ qca_memdump->current_seq_no += 1;
+ qca_memdump->received_dump = rx_size;
} else {
bt_dev_err(hu->hdev,
- "QCA memdump received %d, no space for packet %d",
- qca_memdump->received_dump, seq_no);
+ "QCA memdump received no space for packet %d",
+ qca_memdump->current_seq_no);
}
- qca->qca_memdump = qca_memdump;
- kfree_skb(skb);
+
if (seq_no == QCA_LAST_SEQUENCE_NUM) {
bt_dev_info(hu->hdev,
- "QCA memdump Done, received %d, total %d",
- qca_memdump->received_dump,
- qca_memdump->ram_dump_size);
- memdump_buf = qca_memdump->memdump_buf_head;
- dev_coredumpv(&hu->serdev->dev, memdump_buf,
- qca_memdump->received_dump, GFP_KERNEL);
+ "QCA memdump Done, received %d, total %d",
+ qca_memdump->received_dump,
+ qca_memdump->ram_dump_size);
+ hci_devcd_complete(hu->hdev);
cancel_delayed_work(&qca->ctrl_memdump_timeout);
kfree(qca->qca_memdump);
qca->qca_memdump = NULL;
@@ -1541,8 +1562,8 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
mutex_lock(&qca->hci_memdump_lock);
if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
+ hci_devcd_abort(hu->hdev);
if (qca->qca_memdump) {
- vfree(qca->qca_memdump->memdump_buf_head);
kfree(qca->qca_memdump);
qca->qca_memdump = NULL;
}
@@ -1706,6 +1727,17 @@ static int qca_power_on(struct hci_dev *hdev)
return ret;
}
+static void hci_coredump_qca(struct hci_dev *hdev)
+{
+ static const u8 param[] = { 0x26 };
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb))
+ bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb));
+ kfree_skb(skb);
+}
+
static int qca_setup(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
@@ -1820,6 +1852,9 @@ out:
hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
else
hu->hdev->set_bdaddr = qca_set_bdaddr;
+ qca->fw_version = le16_to_cpu(ver.patch_ver);
+ qca->controller_id = le16_to_cpu(ver.rom_ver);
+ hci_devcd_register(hdev, hci_coredump_qca, qca_dmp_hdr, NULL);
return ret;
}
@@ -1839,6 +1874,17 @@ static const struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
+static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = {
+ .soc_type = QCA_WCN3988,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 15000 },
+ { "vddxo", 80000 },
+ { "vddrf", 300000 },
+ { "vddch0", 450000 },
+ },
+ .num_vregs = 4,
+};
+
static const struct qca_device_data qca_soc_data_wcn3990 __maybe_unused = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
@@ -2363,6 +2409,7 @@ static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
{ .compatible = "qcom,qca9377-bt" },
+ { .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988},
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
@@ -2384,6 +2431,18 @@ static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match);
#endif
+#ifdef CONFIG_DEV_COREDUMP
+static void hciqca_coredump(struct device *dev)
+{
+ struct serdev_device *serdev = to_serdev_device(dev);
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ struct hci_uart *hu = &qcadev->serdev_hu;
+ struct hci_dev *hdev = hu->hdev;
+
+ if (hdev->dump.coredump)
+ hdev->dump.coredump(hdev);
+}
+#endif
static struct serdev_device_driver qca_serdev_driver = {
.probe = qca_serdev_probe,
@@ -2394,6 +2453,9 @@ static struct serdev_device_driver qca_serdev_driver = {
.acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match),
.shutdown = qca_serdev_shutdown,
.pm = &qca_pm_ops,
+#ifdef CONFIG_DEV_COREDUMP
+ .coredump = hciqca_coredump,
+#endif
},
};
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4953c1494723..be43cd08027b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -571,6 +571,7 @@ config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && INET
depends on PAGE_SIZE_LESS_THAN_64KB
+ select PAGE_POOL
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 594094526648..b19492a7f6ad 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -49,9 +49,6 @@ DEFINE_SHOW_ATTRIBUTE(bond_debug_rlb_hash);
void bond_debug_register(struct bonding *bond)
{
- if (!bonding_debug_root)
- return;
-
bond->debug_dir =
debugfs_create_dir(bond->dev->name, bonding_debug_root);
@@ -61,9 +58,6 @@ void bond_debug_register(struct bonding *bond)
void bond_debug_unregister(struct bonding *bond)
{
- if (!bonding_debug_root)
- return;
-
debugfs_remove_recursive(bond->debug_dir);
}
@@ -71,9 +65,6 @@ void bond_debug_reregister(struct bonding *bond)
{
struct dentry *d;
- if (!bonding_debug_root)
- return;
-
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
bonding_debug_root, bond->dev->name);
if (!IS_ERR(d)) {
@@ -84,11 +75,11 @@ void bond_debug_reregister(struct bonding *bond)
}
}
-void bond_create_debugfs(void)
+void __init bond_create_debugfs(void)
{
bonding_debug_root = debugfs_create_dir("bonding", NULL);
- if (!bonding_debug_root)
+ if (IS_ERR(bonding_debug_root))
pr_warn("Warning: Cannot create bonding directory in debugfs\n");
}
@@ -113,7 +104,7 @@ void bond_debug_reregister(struct bonding *bond)
{
}
-void bond_create_debugfs(void)
+void __init bond_create_debugfs(void)
{
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d26c69d84c1e..f398bec78457 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5046,19 +5046,7 @@ static void bond_set_slave_arr(struct bonding *bond,
static void bond_reset_slave_arr(struct bonding *bond)
{
- struct bond_up_slave *usable, *all;
-
- usable = rtnl_dereference(bond->usable_slaves);
- if (usable) {
- RCU_INIT_POINTER(bond->usable_slaves, NULL);
- kfree_rcu(usable, rcu);
- }
-
- all = rtnl_dereference(bond->all_slaves);
- if (all) {
- RCU_INIT_POINTER(bond->all_slaves, NULL);
- kfree_rcu(all, rcu);
- }
+ bond_set_slave_arr(bond, NULL, NULL);
}
/* Build the usable slaves array in control path for modes that use xmit-hash
@@ -5875,8 +5863,7 @@ static void bond_destructor(struct net_device *bond_dev)
if (bond->wq)
destroy_workqueue(bond->wq);
- if (bond->rr_tx_counter)
- free_percpu(bond->rr_tx_counter);
+ free_percpu(bond->rr_tx_counter);
}
void bond_setup(struct net_device *bond_dev)
@@ -5951,7 +5938,6 @@ void bond_setup(struct net_device *bond_dev)
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct bond_up_slave *usable, *all;
struct list_head *iter;
struct slave *slave;
@@ -5962,17 +5948,7 @@ static void bond_uninit(struct net_device *bond_dev)
__bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
- usable = rtnl_dereference(bond->usable_slaves);
- if (usable) {
- RCU_INIT_POINTER(bond->usable_slaves, NULL);
- kfree_rcu(usable, rcu);
- }
-
- all = rtnl_dereference(bond->all_slaves);
- if (all) {
- RCU_INIT_POINTER(bond->all_slaves, NULL);
- kfree_rcu(all, rcu);
- }
+ bond_set_slave_arr(bond, NULL, NULL);
list_del(&bond->bond_list);
@@ -5981,7 +5957,7 @@ static void bond_uninit(struct net_device *bond_dev)
/*------------------------- Module initialization ---------------------------*/
-static int bond_check_params(struct bond_params *params)
+static int __init bond_check_params(struct bond_params *params)
{
int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
struct bond_opt_value newval;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 0bb59da24922..2805135a7205 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -803,7 +803,7 @@ static const struct attribute_group bonding_group = {
/* Initialize sysfs. This sets up the bonding_masters file in
* /sys/class/net.
*/
-int bond_create_sysfs(struct bond_net *bn)
+int __net_init bond_create_sysfs(struct bond_net *bn)
{
int ret;
@@ -836,7 +836,7 @@ int bond_create_sysfs(struct bond_net *bn)
}
/* Remove /sys/class/net/bonding_masters. */
-void bond_destroy_sysfs(struct bond_net *bn)
+void __net_exit bond_destroy_sysfs(struct bond_net *bn)
{
netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
}
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index fdda62d6eb16..294312b58e4f 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -247,11 +247,56 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
return reg_write(priv, addr, regnum, val);
}
+static void mv88e6060_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ unsigned long *interfaces = config->supported_interfaces;
+ struct mv88e6060_priv *priv = ds->priv;
+ int addr = REG_PORT(port);
+ int ret;
+
+ ret = reg_read(priv, addr, PORT_STATUS);
+ if (ret < 0) {
+ dev_err(ds->dev,
+ "port %d: unable to read status register: %pe\n",
+ port, ERR_PTR(ret));
+ return;
+ }
+
+ /* If the port is configured in SNI mode (acts as a 10Mbps PHY),
+ * it should have phy-mode = "sni", but that doesn't yet exist, so
+ * forcibly fail validation until the need arises to introduce it.
+ */
+ if (!(ret & PORT_STATUS_PORTMODE)) {
+ dev_warn(ds->dev, "port %d: SNI mode not supported\n", port);
+ return;
+ }
+
+ config->mac_capabilities = MAC_100 | MAC_10 | MAC_SYM_PAUSE;
+
+ if (port >= 4) {
+ /* Ports 4 and 5 can support MII, REVMII and REVRMII modes */
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVRMII, interfaces);
+ }
+ if (port <= 4) {
+ /* Ports 0 to 3 have internal PHYs, and port 4 can optionally
+ * use an internal PHY.
+ */
+ /* Internal PHY */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ /* Default phylib interface mode */
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ }
+}
+
static const struct dsa_switch_ops mv88e6060_switch_ops = {
.get_tag_protocol = mv88e6060_get_tag_protocol,
.setup = mv88e6060_setup,
.phy_read = mv88e6060_phy_read,
.phy_write = mv88e6060_phy_write,
+ .phylink_get_caps = mv88e6060_phylink_get_caps,
};
static int mv88e6060_probe(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 4310e7793e58..292e6d087e8b 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -276,7 +276,7 @@ MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
static struct mdio_driver realtek_mdio_driver = {
.mdiodrv.driver = {
.name = "realtek-mdio",
- .of_match_table = of_match_ptr(realtek_mdio_of_match),
+ .of_match_table = realtek_mdio_of_match,
},
.probe = realtek_mdio_probe,
.remove = realtek_mdio_remove,
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index c2bd8bb6c9c2..ff13563059c5 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -556,7 +556,7 @@ MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
static struct platform_driver realtek_smi_driver = {
.driver = {
.name = "realtek-smi",
- .of_match_table = of_match_ptr(realtek_smi_of_match),
+ .of_match_table = realtek_smi_of_match,
},
.probe = realtek_smi_probe,
.remove = realtek_smi_remove,
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index c37d2e537230..9167e83fbceb 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -331,13 +331,9 @@ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
A5PSW_MCAST_DEF_MASK};
int i;
- if (set)
- a5psw->bridged_ports |= BIT(port);
- else
- a5psw->bridged_ports &= ~BIT(port);
-
for (i = 0; i < ARRAY_SIZE(offsets); i++)
- a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
+ a5psw_reg_rmw(a5psw, offsets[i], BIT(port),
+ set ? BIT(port) : 0);
}
static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
@@ -365,6 +361,8 @@ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
a5psw->br_dev = bridge.dev;
a5psw_port_set_standalone(a5psw, port, false);
+ a5psw->bridged_ports |= BIT(port);
+
return 0;
}
@@ -373,6 +371,8 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
{
struct a5psw *a5psw = ds->priv;
+ a5psw->bridged_ports &= ~BIT(port);
+
a5psw_port_set_standalone(a5psw, port, true);
/* No more ports bridged */
@@ -380,9 +380,63 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
a5psw->br_dev = NULL;
}
+static int a5psw_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+a5psw_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct a5psw *a5psw = ds->priv;
+ u32 val;
+
+ /* If a port is set as standalone, we do not want to be able to
+ * configure flooding nor learning which would result in joining the
+ * unique bridge. This can happen when a port leaves the bridge, in
+ * which case the DSA core will try to "clear" all flags for the
+ * standalone port (ie enable flooding, disable learning). In that case
+ * do not fail but do not apply the flags.
+ */
+ if (!(a5psw->bridged_ports & BIT(port)))
+ return 0;
+
+ if (flags.mask & BR_LEARNING) {
+ val = flags.val & BR_LEARNING ? 0 : A5PSW_INPUT_LEARN_DIS(port);
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN,
+ A5PSW_INPUT_LEARN_DIS(port), val);
+ }
+
+ if (flags.mask & BR_FLOOD) {
+ val = flags.val & BR_FLOOD ? BIT(port) : 0;
+ a5psw_reg_rmw(a5psw, A5PSW_UCAST_DEF_MASK, BIT(port), val);
+ }
+
+ if (flags.mask & BR_MCAST_FLOOD) {
+ val = flags.val & BR_MCAST_FLOOD ? BIT(port) : 0;
+ a5psw_reg_rmw(a5psw, A5PSW_MCAST_DEF_MASK, BIT(port), val);
+ }
+
+ if (flags.mask & BR_BCAST_FLOOD) {
+ val = flags.val & BR_BCAST_FLOOD ? BIT(port) : 0;
+ a5psw_reg_rmw(a5psw, A5PSW_BCAST_DEF_MASK, BIT(port), val);
+ }
+
+ return 0;
+}
+
static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
bool learning_enabled, rx_enabled, tx_enabled;
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct a5psw *a5psw = ds->priv;
switch (state) {
@@ -396,12 +450,12 @@ static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
case BR_STATE_LEARNING:
rx_enabled = false;
tx_enabled = false;
- learning_enabled = true;
+ learning_enabled = dp->learning;
break;
case BR_STATE_FORWARDING:
rx_enabled = true;
tx_enabled = true;
- learning_enabled = true;
+ learning_enabled = dp->learning;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
@@ -585,6 +639,146 @@ out_unlock:
return ret;
}
+static int a5psw_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ u32 mask = BIT(port + A5PSW_VLAN_VERI_SHIFT) |
+ BIT(port + A5PSW_VLAN_DISC_SHIFT);
+ u32 val = vlan_filtering ? mask : 0;
+ struct a5psw *a5psw = ds->priv;
+
+ /* Disable/enable vlan tagging */
+ a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE_ENA, BIT(port),
+ vlan_filtering ? BIT(port) : 0);
+
+ /* Disable/enable vlan input filtering */
+ a5psw_reg_rmw(a5psw, A5PSW_VLAN_VERIFY, mask, val);
+
+ return 0;
+}
+
+static int a5psw_find_vlan_entry(struct a5psw *a5psw, u16 vid)
+{
+ u32 vlan_res;
+ int i;
+
+ /* Find vlan for this port */
+ for (i = 0; i < A5PSW_VLAN_COUNT; i++) {
+ vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i));
+ if (FIELD_GET(A5PSW_VLAN_RES_VLANID, vlan_res) == vid)
+ return i;
+ }
+
+ return -1;
+}
+
+static int a5psw_new_vlan_res_entry(struct a5psw *a5psw, u16 newvid)
+{
+ u32 vlan_res;
+ int i;
+
+ /* Find a free VLAN entry */
+ for (i = 0; i < A5PSW_VLAN_COUNT; i++) {
+ vlan_res = a5psw_reg_readl(a5psw, A5PSW_VLAN_RES(i));
+ if (!(FIELD_GET(A5PSW_VLAN_RES_PORTMASK, vlan_res))) {
+ vlan_res = FIELD_PREP(A5PSW_VLAN_RES_VLANID, newvid);
+ a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(i), vlan_res);
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static void a5psw_port_vlan_tagged_cfg(struct a5psw *a5psw,
+ unsigned int vlan_res_id, int port,
+ bool set)
+{
+ u32 mask = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_RD_TAGMASK |
+ BIT(port);
+ u32 vlan_res_off = A5PSW_VLAN_RES(vlan_res_id);
+ u32 val = A5PSW_VLAN_RES_WR_TAGMASK, reg;
+
+ if (set)
+ val |= BIT(port);
+
+ /* Toggle tag mask read */
+ a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK);
+ reg = a5psw_reg_readl(a5psw, vlan_res_off);
+ a5psw_reg_writel(a5psw, vlan_res_off, A5PSW_VLAN_RES_RD_TAGMASK);
+
+ reg &= ~mask;
+ reg |= val;
+ a5psw_reg_writel(a5psw, vlan_res_off, reg);
+}
+
+static void a5psw_port_vlan_cfg(struct a5psw *a5psw, unsigned int vlan_res_id,
+ int port, bool set)
+{
+ u32 mask = A5PSW_VLAN_RES_WR_TAGMASK | BIT(port);
+ u32 reg = A5PSW_VLAN_RES_WR_PORTMASK;
+
+ if (set)
+ reg |= BIT(port);
+
+ a5psw_reg_rmw(a5psw, A5PSW_VLAN_RES(vlan_res_id), mask, reg);
+}
+
+static int a5psw_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool tagged = !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct a5psw *a5psw = ds->priv;
+ u16 vid = vlan->vid;
+ int vlan_res_id;
+
+ dev_dbg(a5psw->dev, "Add VLAN %d on port %d, %s, %s\n",
+ vid, port, tagged ? "tagged" : "untagged",
+ pvid ? "PVID" : "no PVID");
+
+ vlan_res_id = a5psw_find_vlan_entry(a5psw, vid);
+ if (vlan_res_id < 0) {
+ vlan_res_id = a5psw_new_vlan_res_entry(a5psw, vid);
+ if (vlan_res_id < 0)
+ return -ENOSPC;
+ }
+
+ a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, true);
+ if (tagged)
+ a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, true);
+
+ /* Configure port to tag with corresponding VID, but do not enable it
+ * yet: wait for vlan filtering to be enabled to enable vlan port
+ * tagging
+ */
+ if (pvid)
+ a5psw_reg_writel(a5psw, A5PSW_SYSTEM_TAGINFO(port), vid);
+
+ return 0;
+}
+
+static int a5psw_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct a5psw *a5psw = ds->priv;
+ u16 vid = vlan->vid;
+ int vlan_res_id;
+
+ dev_dbg(a5psw->dev, "Removing VLAN %d on port %d\n", vid, port);
+
+ vlan_res_id = a5psw_find_vlan_entry(a5psw, vid);
+ if (vlan_res_id < 0)
+ return -EINVAL;
+
+ a5psw_port_vlan_cfg(a5psw, vlan_res_id, port, false);
+ a5psw_port_vlan_tagged_cfg(a5psw, vlan_res_id, port, false);
+
+ return 0;
+}
+
static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
{
u32 reg_lo, reg_hi;
@@ -702,6 +896,27 @@ static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
ctrl_stats->MACControlFramesReceived = stat;
}
+static void a5psw_vlan_setup(struct a5psw *a5psw, int port)
+{
+ u32 reg;
+
+ /* Enable TAG always mode for the port, this is actually controlled
+ * by VLAN_IN_MODE_ENA field which will be used for PVID insertion
+ */
+ reg = A5PSW_VLAN_IN_MODE_TAG_ALWAYS;
+ reg <<= A5PSW_VLAN_IN_MODE_PORT_SHIFT(port);
+ a5psw_reg_rmw(a5psw, A5PSW_VLAN_IN_MODE, A5PSW_VLAN_IN_MODE_PORT(port),
+ reg);
+
+ /* Set transparent mode for output frame manipulation, this will depend
+ * on the VLAN_RES configuration mode
+ */
+ reg = A5PSW_VLAN_OUT_MODE_TRANSPARENT;
+ reg <<= A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port);
+ a5psw_reg_rmw(a5psw, A5PSW_VLAN_OUT_MODE,
+ A5PSW_VLAN_OUT_MODE_PORT(port), reg);
+}
+
static int a5psw_setup(struct dsa_switch *ds)
{
struct a5psw *a5psw = ds->priv;
@@ -776,6 +991,8 @@ static int a5psw_setup(struct dsa_switch *ds)
/* Enable standalone mode for user ports */
if (dsa_port_is_user(dp))
a5psw_port_set_standalone(a5psw, port, true);
+
+ a5psw_vlan_setup(a5psw, port);
}
return 0;
@@ -801,8 +1018,13 @@ static const struct dsa_switch_ops a5psw_switch_ops = {
.set_ageing_time = a5psw_set_ageing_time,
.port_bridge_join = a5psw_port_bridge_join,
.port_bridge_leave = a5psw_port_bridge_leave,
+ .port_pre_bridge_flags = a5psw_port_pre_bridge_flags,
+ .port_bridge_flags = a5psw_port_bridge_flags,
.port_stp_state_set = a5psw_port_stp_state_set,
.port_fast_age = a5psw_port_fast_age,
+ .port_vlan_filtering = a5psw_port_vlan_filtering,
+ .port_vlan_add = a5psw_port_vlan_add,
+ .port_vlan_del = a5psw_port_vlan_del,
.port_fdb_add = a5psw_port_fdb_add,
.port_fdb_del = a5psw_port_fdb_del,
.port_fdb_dump = a5psw_port_fdb_dump,
@@ -992,6 +1214,8 @@ static int a5psw_probe(struct platform_device *pdev)
if (IS_ERR(a5psw->base))
return PTR_ERR(a5psw->base);
+ a5psw->bridged_ports = BIT(A5PSW_CPU_PORT);
+
ret = a5psw_pcs_get(a5psw);
if (ret)
return ret;
@@ -1090,7 +1314,7 @@ MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
static struct platform_driver a5psw_driver = {
.driver = {
.name = "rzn1_a5psw",
- .of_match_table = of_match_ptr(a5psw_of_mtable),
+ .of_match_table = a5psw_of_mtable,
},
.probe = a5psw_probe,
.remove = a5psw_remove,
diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h
index b869192eef3f..d54acedac194 100644
--- a/drivers/net/dsa/rzn1_a5psw.h
+++ b/drivers/net/dsa/rzn1_a5psw.h
@@ -51,7 +51,9 @@
#define A5PSW_VLAN_IN_MODE_TAG_ALWAYS 0x2
#define A5PSW_VLAN_OUT_MODE 0x2C
-#define A5PSW_VLAN_OUT_MODE_PORT(port) (GENMASK(1, 0) << ((port) * 2))
+#define A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port) ((port) * 2)
+#define A5PSW_VLAN_OUT_MODE_PORT(port) (GENMASK(1, 0) << \
+ A5PSW_VLAN_OUT_MODE_PORT_SHIFT(port))
#define A5PSW_VLAN_OUT_MODE_DIS 0x0
#define A5PSW_VLAN_OUT_MODE_STRIP 0x1
#define A5PSW_VLAN_OUT_MODE_TAG_THROUGH 0x2
@@ -60,7 +62,7 @@
#define A5PSW_VLAN_IN_MODE_ENA 0x30
#define A5PSW_VLAN_TAG_ID 0x34
-#define A5PSW_SYSTEM_TAGINFO(port) (0x200 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_SYSTEM_TAGINFO(port) (0x200 + 4 * (port))
#define A5PSW_AUTH_PORT(port) (0x240 + 4 * (port))
#define A5PSW_AUTH_PORT_AUTHORIZED BIT(0)
@@ -69,7 +71,7 @@
#define A5PSW_VLAN_RES_WR_PORTMASK BIT(30)
#define A5PSW_VLAN_RES_WR_TAGMASK BIT(29)
#define A5PSW_VLAN_RES_RD_TAGMASK BIT(28)
-#define A5PSW_VLAN_RES_ID GENMASK(16, 5)
+#define A5PSW_VLAN_RES_VLANID GENMASK(16, 5)
#define A5PSW_VLAN_RES_PORTMASK GENMASK(4, 0)
#define A5PSW_RXMATCH_CONFIG(port) (0x3e80 + 4 * (port))
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 5fab589b3ddf..3d9220f9c9fe 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3982,8 +3982,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
}
adapter->mii_bus->name = "et131x_eth_mii";
- snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
- (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
+ snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(adapter->pdev));
adapter->mii_bus->priv = netdev;
adapter->mii_bus->read = et131x_mdio_read;
adapter->mii_bus->write = et131x_mdio_write;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index ec704222925d..751454d305c6 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -367,7 +367,7 @@ static void *slow_memcpy( void *dst, const void *src, size_t len )
}
-struct net_device * __init atarilance_probe(void)
+static struct net_device * __init atarilance_probe(void)
{
int i;
static int found;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 41d96f4b23d8..4d4140b7c450 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -2041,7 +2041,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
if (of_id) {
- pdata->enet_id = (enum xgene_enet_id)of_id->data;
+ pdata->enet_id = (uintptr_t)of_id->data;
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5ef073a79ce9..7f956cf36337 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1539,8 +1539,7 @@ static int tg3_mdio_init(struct tg3 *tp)
return -ENOMEM;
tp->mdio_bus->name = "tg3 mdio bus";
- snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
- (tp->pdev->bus->number << 8) | tp->pdev->devfn);
+ snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
tp->mdio_bus->priv = tp;
tp->mdio_bus->parent = &tp->pdev->dev;
tp->mdio_bus->read = &tg3_mdio_read;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 692cb2d04c1c..a8b9d1a3e4d5 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2538,7 +2538,7 @@ MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match);
static struct platform_driver gemini_ethernet_port_driver = {
.driver = {
.name = "gemini-ethernet-port",
- .of_match_table = of_match_ptr(gemini_ethernet_port_of_match),
+ .of_match_table = gemini_ethernet_port_of_match,
},
.probe = gemini_ethernet_port_probe,
.remove = gemini_ethernet_port_remove,
@@ -2604,7 +2604,7 @@ MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match);
static struct platform_driver gemini_ethernet_driver = {
.driver = {
.name = DRV_NAME,
- .of_match_table = of_match_ptr(gemini_ethernet_of_match),
+ .of_match_table = gemini_ethernet_of_match,
},
.probe = gemini_ethernet_probe,
.remove = gemini_ethernet_remove,
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 139fe66f8bcd..183069581bc0 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -149,6 +149,40 @@ static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
}
+static void ftmac100_setup_mc_ht(struct ftmac100 *priv)
+{
+ struct netdev_hw_addr *ha;
+ u64 maht = 0; /* Multicast Address Hash Table */
+
+ netdev_for_each_mc_addr(ha, priv->netdev) {
+ u32 hash = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ maht |= BIT_ULL(hash);
+ }
+
+ iowrite32(lower_32_bits(maht), priv->base + FTMAC100_OFFSET_MAHT0);
+ iowrite32(upper_32_bits(maht), priv->base + FTMAC100_OFFSET_MAHT1);
+}
+
+static void ftmac100_set_rx_bits(struct ftmac100 *priv, unsigned int *maccr)
+{
+ struct net_device *netdev = priv->netdev;
+
+ /* Clear all */
+ *maccr &= ~(FTMAC100_MACCR_RCV_ALL | FTMAC100_MACCR_RX_MULTIPKT |
+ FTMAC100_MACCR_HT_MULTI_EN);
+
+ /* Set the requested bits */
+ if (netdev->flags & IFF_PROMISC)
+ *maccr |= FTMAC100_MACCR_RCV_ALL;
+ if (netdev->flags & IFF_ALLMULTI)
+ *maccr |= FTMAC100_MACCR_RX_MULTIPKT;
+ else if (netdev_mc_count(netdev)) {
+ *maccr |= FTMAC100_MACCR_HT_MULTI_EN;
+ ftmac100_setup_mc_ht(priv);
+ }
+}
+
#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \
FTMAC100_MACCR_RCV_EN | \
FTMAC100_MACCR_XDMA_EN | \
@@ -182,11 +216,7 @@ static int ftmac100_start_hw(struct ftmac100 *priv)
if (netdev->mtu > ETH_DATA_LEN)
maccr |= FTMAC100_MACCR_RX_FTL;
- /* Add other bits as needed */
- if (netdev->flags & IFF_PROMISC)
- maccr |= FTMAC100_MACCR_RCV_ALL;
- if (netdev->flags & IFF_ALLMULTI)
- maccr |= FTMAC100_MACCR_RX_MULTIPKT;
+ ftmac100_set_rx_bits(priv, &maccr);
iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
return 0;
@@ -1067,6 +1097,15 @@ static int ftmac100_change_mtu(struct net_device *netdev, int mtu)
return 0;
}
+static void ftmac100_set_rx_mode(struct net_device *netdev)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ unsigned int maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
+
+ ftmac100_set_rx_bits(priv, &maccr);
+ iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
+}
+
static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_open = ftmac100_open,
.ndo_stop = ftmac100_stop,
@@ -1075,6 +1114,7 @@ static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = ftmac100_do_ioctl,
.ndo_change_mtu = ftmac100_change_mtu,
+ .ndo_set_rx_mode = ftmac100_set_rx_mode,
};
/******************************************************************************
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5ca9906d7c6a..a8fbcada6b01 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -548,13 +548,11 @@ enum {
enum fec_txbuf_type {
FEC_TXBUF_T_SKB,
FEC_TXBUF_T_XDP_NDO,
+ FEC_TXBUF_T_XDP_TX,
};
struct fec_tx_buffer {
- union {
- struct sk_buff *skb;
- struct xdp_frame *xdp;
- };
+ void *buf_p;
enum fec_txbuf_type type;
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3bd0bf03aedb..f77105f017c1 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -69,6 +69,7 @@
#include <soc/imx/cpuidle.h>
#include <linux/filter.h>
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <asm/cacheflush.h>
@@ -76,6 +77,9 @@
static void set_multicast_list(struct net_device *ndev);
static void fec_enet_itr_coal_set(struct net_device *ndev);
+static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
+ int cpu, struct xdp_buff *xdp,
+ u32 dma_sync_len);
#define DRIVER_NAME "fec"
@@ -396,7 +400,7 @@ static void fec_dump(struct net_device *ndev)
fec16_to_cpu(bdp->cbd_sc),
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
- txq->tx_buf[index].skb);
+ txq->tx_buf[index].buf_p);
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
index++;
} while (bdp != txq->bd.base);
@@ -653,7 +657,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
index = fec_enet_get_bd_index(last_bdp, &txq->bd);
/* Save skb pointer */
- txq->tx_buf[index].skb = skb;
+ txq->tx_buf[index].buf_p = skb;
/* Make sure the updates to rest of the descriptor are performed before
* transferring ownership.
@@ -859,7 +863,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
}
/* Save skb pointer */
- txq->tx_buf[index].skb = skb;
+ txq->tx_buf[index].buf_p = skb;
skb_tx_timestamp(skb);
txq->bd.cur = bdp;
@@ -956,26 +960,27 @@ static void fec_enet_bd_init(struct net_device *dev)
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
- if (txq->tx_buf[i].skb) {
- dev_kfree_skb_any(txq->tx_buf[i].skb);
- txq->tx_buf[i].skb = NULL;
- }
- } else {
+ if (txq->tx_buf[i].buf_p)
+ dev_kfree_skb_any(txq->tx_buf[i].buf_p);
+ } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
if (bdp->cbd_bufaddr)
dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
- if (txq->tx_buf[i].xdp) {
- xdp_return_frame(txq->tx_buf[i].xdp);
- txq->tx_buf[i].xdp = NULL;
- }
+ if (txq->tx_buf[i].buf_p)
+ xdp_return_frame(txq->tx_buf[i].buf_p);
+ } else {
+ struct page *page = txq->tx_buf[i].buf_p;
- /* restore default tx buffer type: FEC_TXBUF_T_SKB */
- txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
+ if (page)
+ page_pool_put_page(page->pp, page, 0, false);
}
+ txq->tx_buf[i].buf_p = NULL;
+ /* restore default tx buffer type: FEC_TXBUF_T_SKB */
+ txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
bdp->cbd_bufaddr = cpu_to_fec32(0);
bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
}
@@ -1382,6 +1387,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
struct netdev_queue *nq;
int index = 0;
int entries_free;
+ struct page *page;
+ int frame_len;
fep = netdev_priv(ndev);
@@ -1403,8 +1410,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
index = fec_enet_get_bd_index(bdp, &txq->bd);
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
- skb = txq->tx_buf[index].skb;
- txq->tx_buf[index].skb = NULL;
+ skb = txq->tx_buf[index].buf_p;
if (bdp->cbd_bufaddr &&
!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev,
@@ -1423,17 +1429,24 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
if (unlikely(!budget))
break;
- xdpf = txq->tx_buf[index].xdp;
- if (bdp->cbd_bufaddr)
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- fec16_to_cpu(bdp->cbd_datlen),
- DMA_TO_DEVICE);
+ if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
+ xdpf = txq->tx_buf[index].buf_p;
+ if (bdp->cbd_bufaddr)
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ } else {
+ page = txq->tx_buf[index].buf_p;
+ }
+
bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (!xdpf) {
+ if (unlikely(!txq->tx_buf[index].buf_p)) {
txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
goto tx_buf_done;
}
+
+ frame_len = fec16_to_cpu(bdp->cbd_datlen);
}
/* Check for errors. */
@@ -1457,7 +1470,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
ndev->stats.tx_bytes += skb->len;
else
- ndev->stats.tx_bytes += xdpf->len;
+ ndev->stats.tx_bytes += frame_len;
}
/* Deferred means some collisions occurred during transmit,
@@ -1482,14 +1495,17 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
- } else {
- xdp_return_frame(xdpf);
-
- txq->tx_buf[index].xdp = NULL;
- /* restore default tx buffer type: FEC_TXBUF_T_SKB */
- txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+ } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
+ xdp_return_frame_rx_napi(xdpf);
+ } else { /* recycle pages of XDP_TX frames */
+ /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
+ page_pool_put_page(page->pp, page, 0, true);
}
+ txq->tx_buf[index].buf_p = NULL;
+ /* restore default tx buffer type: FEC_TXBUF_T_SKB */
+ txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
+
tx_buf_done:
/* Make sure the update to bdp and tx_buf are performed
* before dirty_tx
@@ -1542,7 +1558,7 @@ static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
static u32
fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
- struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index)
+ struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
{
unsigned int sync, len = xdp->data_end - xdp->data;
u32 ret = FEC_ENET_XDP_PASS;
@@ -1552,8 +1568,10 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
act = bpf_prog_run_xdp(prog, xdp);
- /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
- sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM;
+ /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
+ * max len CPU touch
+ */
+ sync = xdp->data_end - xdp->data;
sync = max(sync, len);
switch (act) {
@@ -1574,11 +1592,19 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
}
break;
- default:
- bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
- fallthrough;
-
case XDP_TX:
+ err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
+ if (unlikely(err)) {
+ ret = FEC_ENET_XDP_CONSUMED;
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+ trace_xdp_exception(fep->netdev, prog, act);
+ } else {
+ ret = FEC_ENET_XDP_TX;
+ }
+ break;
+
+ default:
bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
fallthrough;
@@ -1620,6 +1646,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
u32 ret, xdp_result = FEC_ENET_XDP_PASS;
u32 data_start = FEC_ENET_XDP_HEADROOM;
+ int cpu = smp_processor_id();
struct xdp_buff xdp;
struct page *page;
u32 sub_len = 4;
@@ -1698,7 +1725,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* subtract 16bit shift and FCS */
xdp_prepare_buff(&xdp, page_address(page),
data_start, pkt_len - sub_len, false);
- ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index);
+ ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
xdp_result |= ret;
if (ret != FEC_ENET_XDP_PASS)
goto rx_processing_done;
@@ -3208,7 +3235,6 @@ static void fec_enet_free_buffers(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
- struct sk_buff *skb;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
unsigned int q;
@@ -3233,18 +3259,23 @@ static void fec_enet_free_buffers(struct net_device *ndev)
kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL;
+ if (!txq->tx_buf[i].buf_p) {
+ txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
+ continue;
+ }
+
if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
- skb = txq->tx_buf[i].skb;
- txq->tx_buf[i].skb = NULL;
- dev_kfree_skb(skb);
+ dev_kfree_skb(txq->tx_buf[i].buf_p);
+ } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
+ xdp_return_frame(txq->tx_buf[i].buf_p);
} else {
- if (txq->tx_buf[i].xdp) {
- xdp_return_frame(txq->tx_buf[i].xdp);
- txq->tx_buf[i].xdp = NULL;
- }
+ struct page *page = txq->tx_buf[i].buf_p;
- txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
+ page_pool_put_page(page->pp, page, 0, false);
}
+
+ txq->tx_buf[i].buf_p = NULL;
+ txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
}
}
}
@@ -3767,12 +3798,14 @@ fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
struct fec_enet_priv_tx_q *txq,
- struct xdp_frame *frame)
+ void *frame, u32 dma_sync_len,
+ bool ndo_xmit)
{
unsigned int index, status, estatus;
struct bufdesc *bdp;
dma_addr_t dma_addr;
int entries_free;
+ u16 frame_len;
entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -3787,17 +3820,37 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
index = fec_enet_get_bd_index(bdp, &txq->bd);
- dma_addr = dma_map_single(&fep->pdev->dev, frame->data,
- frame->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, dma_addr))
- return -ENOMEM;
+ if (ndo_xmit) {
+ struct xdp_frame *xdpf = frame;
+
+ dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dma_addr))
+ return -ENOMEM;
+
+ frame_len = xdpf->len;
+ txq->tx_buf[index].buf_p = xdpf;
+ txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
+ } else {
+ struct xdp_buff *xdpb = frame;
+ struct page *page;
+
+ page = virt_to_page(xdpb->data);
+ dma_addr = page_pool_get_dma_addr(page) +
+ (xdpb->data - xdpb->data_hard_start);
+ dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
+ dma_sync_len, DMA_BIDIRECTIONAL);
+ frame_len = xdpb->data_end - xdpb->data;
+ txq->tx_buf[index].buf_p = page;
+ txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
+ }
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
if (fep->bufdesc_ex)
estatus = BD_ENET_TX_INT;
bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
- bdp->cbd_datlen = cpu_to_fec16(frame->len);
+ bdp->cbd_datlen = cpu_to_fec16(frame_len);
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
@@ -3809,9 +3862,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
- txq->tx_buf[index].xdp = frame;
-
/* Make sure the updates to rest of the descriptor are performed before
* transferring ownership.
*/
@@ -3837,6 +3887,29 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
return 0;
}
+static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
+ int cpu, struct xdp_buff *xdp,
+ u32 dma_sync_len)
+{
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
+ int queue, ret;
+
+ queue = fec_enet_xdp_get_tx_queue(fep, cpu);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(fep->netdev, queue);
+
+ __netif_tx_lock(nq, cpu);
+
+ /* Avoid tx timeout as XDP shares the queue with kernel stack */
+ txq_trans_cond_update(nq);
+ ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
+
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
static int fec_enet_xdp_xmit(struct net_device *dev,
int num_frames,
struct xdp_frame **frames,
@@ -3859,7 +3932,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
/* Avoid tx timeout as XDP shares the queue with kernel stack */
txq_trans_cond_update(nq);
for (i = 0; i < num_frames; i++) {
- if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0)
+ if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
break;
sent_frames++;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 6efea4662858..e214bfaece1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -17,11 +17,11 @@ hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
-hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \
+hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \
hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \
+hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \
hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 514a20bce4f4..a4b43bcd2f0c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -382,6 +382,7 @@ struct hnae3_dev_specs {
u16 umv_size;
u16 mc_mac_size;
u32 mac_stats_num;
+ u8 tnl_num;
};
struct hnae3_client_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 0bd858620f27..4d15eb73b972 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -826,7 +826,9 @@ struct hclge_dev_specs_1_cmd {
u8 rsv0[2];
__le16 umv_size;
__le16 mc_mac_size;
- u8 rsv1[12];
+ u8 rsv1[6];
+ u8 tnl_num;
+ u8 rsv2[5];
};
/* mac speed type defined in firmware command */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 0fb2eaee3e8a..f01a7a9ee02c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -7,6 +7,7 @@
#include "hclge_debugfs.h"
#include "hclge_err.h"
#include "hclge_main.h"
+#include "hclge_regs.h"
#include "hclge_tm.h"
#include "hnae3.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 437b510f2b80..0f50dba6cc47 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -20,6 +20,7 @@
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hclge_mdio.h"
+#include "hclge_regs.h"
#include "hclge_tm.h"
#include "hclge_err.h"
#include "hnae3.h"
@@ -40,20 +41,6 @@
#define HCLGE_PF_RESET_SYNC_TIME 20
#define HCLGE_PF_RESET_SYNC_CNT 1500
-/* Get DFX BD number offset */
-#define HCLGE_DFX_BIOS_BD_OFFSET 1
-#define HCLGE_DFX_SSU_0_BD_OFFSET 2
-#define HCLGE_DFX_SSU_1_BD_OFFSET 3
-#define HCLGE_DFX_IGU_BD_OFFSET 4
-#define HCLGE_DFX_RPU_0_BD_OFFSET 5
-#define HCLGE_DFX_RPU_1_BD_OFFSET 6
-#define HCLGE_DFX_NCSI_BD_OFFSET 7
-#define HCLGE_DFX_RTC_BD_OFFSET 8
-#define HCLGE_DFX_PPP_BD_OFFSET 9
-#define HCLGE_DFX_RCB_BD_OFFSET 10
-#define HCLGE_DFX_TQP_BD_OFFSET 11
-#define HCLGE_DFX_SSU_2_BD_OFFSET 12
-
#define HCLGE_LINK_STATUS_MS 10
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
@@ -94,62 +81,6 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
- HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
- HCLGE_COMM_NIC_CSQ_DEPTH_REG,
- HCLGE_COMM_NIC_CSQ_TAIL_REG,
- HCLGE_COMM_NIC_CSQ_HEAD_REG,
- HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
- HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
- HCLGE_COMM_NIC_CRQ_DEPTH_REG,
- HCLGE_COMM_NIC_CRQ_TAIL_REG,
- HCLGE_COMM_NIC_CRQ_HEAD_REG,
- HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
- HCLGE_COMM_CMDQ_INTR_STS_REG,
- HCLGE_COMM_CMDQ_INTR_EN_REG,
- HCLGE_COMM_CMDQ_INTR_GEN_REG};
-
-static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
- HCLGE_PF_OTHER_INT_REG,
- HCLGE_MISC_RESET_STS_REG,
- HCLGE_MISC_VECTOR_INT_STS,
- HCLGE_GLOBAL_RESET_REG,
- HCLGE_FUN_RST_ING,
- HCLGE_GRO_EN_REG};
-
-static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
- HCLGE_RING_RX_ADDR_H_REG,
- HCLGE_RING_RX_BD_NUM_REG,
- HCLGE_RING_RX_BD_LENGTH_REG,
- HCLGE_RING_RX_MERGE_EN_REG,
- HCLGE_RING_RX_TAIL_REG,
- HCLGE_RING_RX_HEAD_REG,
- HCLGE_RING_RX_FBD_NUM_REG,
- HCLGE_RING_RX_OFFSET_REG,
- HCLGE_RING_RX_FBD_OFFSET_REG,
- HCLGE_RING_RX_STASH_REG,
- HCLGE_RING_RX_BD_ERR_REG,
- HCLGE_RING_TX_ADDR_L_REG,
- HCLGE_RING_TX_ADDR_H_REG,
- HCLGE_RING_TX_BD_NUM_REG,
- HCLGE_RING_TX_PRIORITY_REG,
- HCLGE_RING_TX_TC_REG,
- HCLGE_RING_TX_MERGE_EN_REG,
- HCLGE_RING_TX_TAIL_REG,
- HCLGE_RING_TX_HEAD_REG,
- HCLGE_RING_TX_FBD_NUM_REG,
- HCLGE_RING_TX_OFFSET_REG,
- HCLGE_RING_TX_EBD_NUM_REG,
- HCLGE_RING_TX_EBD_OFFSET_REG,
- HCLGE_RING_TX_BD_ERR_REG,
- HCLGE_RING_EN_REG};
-
-static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
- HCLGE_TQP_INTR_GL0_REG,
- HCLGE_TQP_INTR_GL1_REG,
- HCLGE_TQP_INTR_GL2_REG,
- HCLGE_TQP_INTR_RL_REG};
-
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
"External Loopback test",
"App Loopback test",
@@ -375,36 +306,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static const u32 hclge_dfx_bd_offset_list[] = {
- HCLGE_DFX_BIOS_BD_OFFSET,
- HCLGE_DFX_SSU_0_BD_OFFSET,
- HCLGE_DFX_SSU_1_BD_OFFSET,
- HCLGE_DFX_IGU_BD_OFFSET,
- HCLGE_DFX_RPU_0_BD_OFFSET,
- HCLGE_DFX_RPU_1_BD_OFFSET,
- HCLGE_DFX_NCSI_BD_OFFSET,
- HCLGE_DFX_RTC_BD_OFFSET,
- HCLGE_DFX_PPP_BD_OFFSET,
- HCLGE_DFX_RCB_BD_OFFSET,
- HCLGE_DFX_TQP_BD_OFFSET,
- HCLGE_DFX_SSU_2_BD_OFFSET
-};
-
-static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
- HCLGE_OPC_DFX_BIOS_COMMON_REG,
- HCLGE_OPC_DFX_SSU_REG_0,
- HCLGE_OPC_DFX_SSU_REG_1,
- HCLGE_OPC_DFX_IGU_EGU_REG,
- HCLGE_OPC_DFX_RPU_REG_0,
- HCLGE_OPC_DFX_RPU_REG_1,
- HCLGE_OPC_DFX_NCSI_REG,
- HCLGE_OPC_DFX_RTC_REG,
- HCLGE_OPC_DFX_PPP_REG,
- HCLGE_OPC_DFX_RCB_REG,
- HCLGE_OPC_DFX_TQP_REG,
- HCLGE_OPC_DFX_SSU_REG_2
-};
-
static const struct key_info meta_data_key_info[] = {
{ PACKET_TYPE_ID, 6 },
{ IP_FRAGEMENT, 1 },
@@ -1425,6 +1326,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+ ae_dev->dev_specs.tnl_num = 0;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1448,6 +1350,7 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
+ ae_dev->dev_specs.tnl_num = req1->tnl_num;
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -12383,463 +12286,6 @@ out:
return ret;
}
-static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
- u32 *regs_num_64_bit)
-{
- struct hclge_desc desc;
- u32 total_num;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query register number cmd failed, ret = %d.\n", ret);
- return ret;
- }
-
- *regs_num_32_bit = le32_to_cpu(desc.data[0]);
- *regs_num_64_bit = le32_to_cpu(desc.data[1]);
-
- total_num = *regs_num_32_bit + *regs_num_64_bit;
- if (!total_num)
- return -EINVAL;
-
- return 0;
-}
-
-static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
- void *data)
-{
-#define HCLGE_32_BIT_REG_RTN_DATANUM 8
-#define HCLGE_32_BIT_DESC_NODATA_LEN 2
-
- struct hclge_desc *desc;
- u32 *reg_val = data;
- __le32 *desc_data;
- int nodata_num;
- int cmd_num;
- int i, k, n;
- int ret;
-
- if (regs_num == 0)
- return 0;
-
- nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
- cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
- HCLGE_32_BIT_REG_RTN_DATANUM);
- desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
- ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query 32 bit register cmd failed, ret = %d.\n", ret);
- kfree(desc);
- return ret;
- }
-
- for (i = 0; i < cmd_num; i++) {
- if (i == 0) {
- desc_data = (__le32 *)(&desc[i].data[0]);
- n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
- } else {
- desc_data = (__le32 *)(&desc[i]);
- n = HCLGE_32_BIT_REG_RTN_DATANUM;
- }
- for (k = 0; k < n; k++) {
- *reg_val++ = le32_to_cpu(*desc_data++);
-
- regs_num--;
- if (!regs_num)
- break;
- }
- }
-
- kfree(desc);
- return 0;
-}
-
-static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
- void *data)
-{
-#define HCLGE_64_BIT_REG_RTN_DATANUM 4
-#define HCLGE_64_BIT_DESC_NODATA_LEN 1
-
- struct hclge_desc *desc;
- u64 *reg_val = data;
- __le64 *desc_data;
- int nodata_len;
- int cmd_num;
- int i, k, n;
- int ret;
-
- if (regs_num == 0)
- return 0;
-
- nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
- cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
- HCLGE_64_BIT_REG_RTN_DATANUM);
- desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
- ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query 64 bit register cmd failed, ret = %d.\n", ret);
- kfree(desc);
- return ret;
- }
-
- for (i = 0; i < cmd_num; i++) {
- if (i == 0) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_64_BIT_REG_RTN_DATANUM;
- }
- for (k = 0; k < n; k++) {
- *reg_val++ = le64_to_cpu(*desc_data++);
-
- regs_num--;
- if (!regs_num)
- break;
- }
- }
-
- kfree(desc);
- return 0;
-}
-
-#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFDFCFBFA
-#define REG_NUM_PER_LINE 4
-#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
-#define REG_SEPARATOR_LINE 1
-#define REG_NUM_REMAIN_MASK 3
-
-int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
-{
- int i;
-
- /* initialize command BD except the last one */
- for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
- hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
- true);
- desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
- }
-
- /* initialize the last command BD */
- hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
-
- return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
-}
-
-static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
- int *bd_num_list,
- u32 type_num)
-{
- u32 entries_per_desc, desc_index, index, offset, i;
- struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
- int ret;
-
- ret = hclge_query_bd_num_cmd_send(hdev, desc);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx bd num fail, status is %d.\n", ret);
- return ret;
- }
-
- entries_per_desc = ARRAY_SIZE(desc[0].data);
- for (i = 0; i < type_num; i++) {
- offset = hclge_dfx_bd_offset_list[i];
- index = offset % entries_per_desc;
- desc_index = offset / entries_per_desc;
- bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
- }
-
- return ret;
-}
-
-static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
- struct hclge_desc *desc_src, int bd_num,
- enum hclge_opcode_type cmd)
-{
- struct hclge_desc *desc = desc_src;
- int i, ret;
-
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- for (i = 0; i < bd_num - 1; i++) {
- desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
- desc++;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- }
-
- desc = desc_src;
- ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
- cmd, ret);
-
- return ret;
-}
-
-static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
- void *data)
-{
- int entries_per_desc, reg_num, separator_num, desc_index, index, i;
- struct hclge_desc *desc = desc_src;
- u32 *reg = data;
-
- entries_per_desc = ARRAY_SIZE(desc->data);
- reg_num = entries_per_desc * bd_num;
- separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < reg_num; i++) {
- index = i % entries_per_desc;
- desc_index = i / entries_per_desc;
- *reg++ = le32_to_cpu(desc[desc_index].data[index]);
- }
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- return reg_num + separator_num;
-}
-
-static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
-{
- u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
- int data_len_per_desc, bd_num, i;
- int *bd_num_list;
- u32 data_len;
- int ret;
-
- bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
- if (!bd_num_list)
- return -ENOMEM;
-
- ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg bd num fail, status is %d.\n", ret);
- goto out;
- }
-
- data_len_per_desc = sizeof_field(struct hclge_desc, data);
- *len = 0;
- for (i = 0; i < dfx_reg_type_num; i++) {
- bd_num = bd_num_list[i];
- data_len = data_len_per_desc * bd_num;
- *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
- }
-
-out:
- kfree(bd_num_list);
- return ret;
-}
-
-static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
-{
- u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
- int bd_num, bd_num_max, buf_len, i;
- struct hclge_desc *desc_src;
- int *bd_num_list;
- u32 *reg = data;
- int ret;
-
- bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
- if (!bd_num_list)
- return -ENOMEM;
-
- ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg bd num fail, status is %d.\n", ret);
- goto out;
- }
-
- bd_num_max = bd_num_list[0];
- for (i = 1; i < dfx_reg_type_num; i++)
- bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
-
- buf_len = sizeof(*desc_src) * bd_num_max;
- desc_src = kzalloc(buf_len, GFP_KERNEL);
- if (!desc_src) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < dfx_reg_type_num; i++) {
- bd_num = bd_num_list[i];
- ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
- hclge_dfx_reg_opcode_list[i]);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg fail, status is %d.\n", ret);
- break;
- }
-
- reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
- }
-
- kfree(desc_src);
-out:
- kfree(bd_num_list);
- return ret;
-}
-
-static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
- struct hnae3_knic_private_info *kinfo)
-{
-#define HCLGE_RING_REG_OFFSET 0x200
-#define HCLGE_RING_INT_REG_OFFSET 0x4
-
- int i, j, reg_num, separator_num;
- int data_num_sum;
- u32 *reg = data;
-
- /* fetching per-PF registers valus from PF PCIe register space */
- reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- data_num_sum = reg_num + separator_num;
-
- reg_num = ARRAY_SIZE(common_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- data_num_sum += reg_num + separator_num;
-
- reg_num = ARRAY_SIZE(ring_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (j = 0; j < kinfo->num_tqps; j++) {
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGE_RING_REG_OFFSET * j);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- }
- data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
-
- reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (j = 0; j < hdev->num_msi_used - 1; j++) {
- for (i = 0; i < reg_num; i++)
- *reg++ = hclge_read_dev(&hdev->hw,
- tqp_intr_reg_addr_list[i] +
- HCLGE_RING_INT_REG_OFFSET * j);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- }
- data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
-
- return data_num_sum;
-}
-
-static int hclge_get_regs_len(struct hnae3_handle *handle)
-{
- int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
- int regs_lines_32_bit, regs_lines_64_bit;
- int ret;
-
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return ret;
- }
-
- ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get dfx reg len failed, ret = %d.\n", ret);
- return ret;
- }
-
- cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
- regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
- REG_SEPARATOR_LINE;
-
- return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
- tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
- regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
-}
-
-static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
- void *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
- int i, reg_num, separator_num, ret;
- u32 *reg = data;
-
- *version = hdev->fw_version;
-
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return;
- }
-
- reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
-
- ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get 32 bit register failed, ret = %d.\n", ret);
- return;
- }
- reg_num = regs_num_32_bit;
- reg += reg_num;
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Get 64 bit register failed, ret = %d.\n", ret);
- return;
- }
- reg_num = regs_num_64_bit * 2;
- reg += reg_num;
- separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- ret = hclge_get_dfx_reg(hdev, reg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Get dfx register failed, ret = %d.\n", ret);
-}
-
static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
{
struct hclge_set_led_state_cmd *req;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 70b059e6d35f..ec233ec57222 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1142,8 +1142,6 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state,
struct hclge_vlan_info *vlan_info);
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
-int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
- struct hclge_desc *desc);
void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type);
int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
new file mode 100644
index 000000000000..43c1c18fa81f
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c
@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2023 Hisilicon Limited.
+
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_regs.h"
+#include "hnae3.h"
+
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_CMDQ_INTR_STS_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
+ HCLGE_PF_OTHER_INT_REG,
+ HCLGE_MISC_RESET_STS_REG,
+ HCLGE_MISC_VECTOR_INT_STS,
+ HCLGE_GLOBAL_RESET_REG,
+ HCLGE_FUN_RST_ING,
+ HCLGE_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
+ HCLGE_RING_RX_ADDR_H_REG,
+ HCLGE_RING_RX_BD_NUM_REG,
+ HCLGE_RING_RX_BD_LENGTH_REG,
+ HCLGE_RING_RX_MERGE_EN_REG,
+ HCLGE_RING_RX_TAIL_REG,
+ HCLGE_RING_RX_HEAD_REG,
+ HCLGE_RING_RX_FBD_NUM_REG,
+ HCLGE_RING_RX_OFFSET_REG,
+ HCLGE_RING_RX_FBD_OFFSET_REG,
+ HCLGE_RING_RX_STASH_REG,
+ HCLGE_RING_RX_BD_ERR_REG,
+ HCLGE_RING_TX_ADDR_L_REG,
+ HCLGE_RING_TX_ADDR_H_REG,
+ HCLGE_RING_TX_BD_NUM_REG,
+ HCLGE_RING_TX_PRIORITY_REG,
+ HCLGE_RING_TX_TC_REG,
+ HCLGE_RING_TX_MERGE_EN_REG,
+ HCLGE_RING_TX_TAIL_REG,
+ HCLGE_RING_TX_HEAD_REG,
+ HCLGE_RING_TX_FBD_NUM_REG,
+ HCLGE_RING_TX_OFFSET_REG,
+ HCLGE_RING_TX_EBD_NUM_REG,
+ HCLGE_RING_TX_EBD_OFFSET_REG,
+ HCLGE_RING_TX_BD_ERR_REG,
+ HCLGE_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
+ HCLGE_TQP_INTR_GL0_REG,
+ HCLGE_TQP_INTR_GL1_REG,
+ HCLGE_TQP_INTR_GL2_REG,
+ HCLGE_TQP_INTR_RL_REG};
+
+/* Get DFX BD number offset */
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
+#define HCLGE_DFX_IGU_BD_OFFSET 4
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
+#define HCLGE_DFX_RTC_BD_OFFSET 8
+#define HCLGE_DFX_PPP_BD_OFFSET 9
+#define HCLGE_DFX_RCB_BD_OFFSET 10
+#define HCLGE_DFX_TQP_BD_OFFSET 11
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
+
+static const u32 hclge_dfx_bd_offset_list[] = {
+ HCLGE_DFX_BIOS_BD_OFFSET,
+ HCLGE_DFX_SSU_0_BD_OFFSET,
+ HCLGE_DFX_SSU_1_BD_OFFSET,
+ HCLGE_DFX_IGU_BD_OFFSET,
+ HCLGE_DFX_RPU_0_BD_OFFSET,
+ HCLGE_DFX_RPU_1_BD_OFFSET,
+ HCLGE_DFX_NCSI_BD_OFFSET,
+ HCLGE_DFX_RTC_BD_OFFSET,
+ HCLGE_DFX_PPP_BD_OFFSET,
+ HCLGE_DFX_RCB_BD_OFFSET,
+ HCLGE_DFX_TQP_BD_OFFSET,
+ HCLGE_DFX_SSU_2_BD_OFFSET
+};
+
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
+ HCLGE_OPC_DFX_SSU_REG_0,
+ HCLGE_OPC_DFX_SSU_REG_1,
+ HCLGE_OPC_DFX_IGU_EGU_REG,
+ HCLGE_OPC_DFX_RPU_REG_0,
+ HCLGE_OPC_DFX_RPU_REG_1,
+ HCLGE_OPC_DFX_NCSI_REG,
+ HCLGE_OPC_DFX_RTC_REG,
+ HCLGE_OPC_DFX_PPP_REG,
+ HCLGE_OPC_DFX_RCB_REG,
+ HCLGE_OPC_DFX_TQP_REG,
+ HCLGE_OPC_DFX_SSU_REG_2
+};
+
+enum hclge_reg_tag {
+ HCLGE_REG_TAG_CMDQ = 0,
+ HCLGE_REG_TAG_COMMON,
+ HCLGE_REG_TAG_RING,
+ HCLGE_REG_TAG_TQP_INTR,
+ HCLGE_REG_TAG_QUERY_32_BIT,
+ HCLGE_REG_TAG_QUERY_64_BIT,
+ HCLGE_REG_TAG_DFX_BIOS_COMMON,
+ HCLGE_REG_TAG_DFX_SSU_0,
+ HCLGE_REG_TAG_DFX_SSU_1,
+ HCLGE_REG_TAG_DFX_IGU_EGU,
+ HCLGE_REG_TAG_DFX_RPU_0,
+ HCLGE_REG_TAG_DFX_RPU_1,
+ HCLGE_REG_TAG_DFX_NCSI,
+ HCLGE_REG_TAG_DFX_RTC,
+ HCLGE_REG_TAG_DFX_PPP,
+ HCLGE_REG_TAG_DFX_RCB,
+ HCLGE_REG_TAG_DFX_TQP,
+ HCLGE_REG_TAG_DFX_SSU_2,
+ HCLGE_REG_TAG_RPU_TNL,
+};
+
+#pragma pack(4)
+struct hclge_reg_tlv {
+ u16 tag;
+ u16 len;
+};
+
+struct hclge_reg_header {
+ u64 magic_number;
+ u8 is_vf;
+ u8 rsv[7];
+};
+
+#pragma pack()
+
+#define HCLGE_REG_TLV_SIZE sizeof(struct hclge_reg_tlv)
+#define HCLGE_REG_HEADER_SIZE sizeof(struct hclge_reg_header)
+#define HCLGE_REG_TLV_SPACE (sizeof(struct hclge_reg_tlv) / sizeof(u32))
+#define HCLGE_REG_HEADER_SPACE (sizeof(struct hclge_reg_header) / sizeof(u32))
+#define HCLGE_REG_MAGIC_NUMBER 0x686e733372656773 /* meaning is hns3regs */
+
+#define HCLGE_REG_RPU_TNL_ID_0 1
+
+static u32 hclge_reg_get_header(void *data)
+{
+ struct hclge_reg_header *header = data;
+
+ header->magic_number = HCLGE_REG_MAGIC_NUMBER;
+ header->is_vf = 0x0;
+
+ return HCLGE_REG_HEADER_SPACE;
+}
+
+static u32 hclge_reg_get_tlv(u32 tag, u32 regs_num, void *data)
+{
+ struct hclge_reg_tlv *tlv = data;
+
+ tlv->tag = tag;
+ tlv->len = regs_num * sizeof(u32) + HCLGE_REG_TLV_SIZE;
+
+ return HCLGE_REG_TLV_SPACE;
+}
+
+static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+#define HCLGE_32_BIT_DESC_NODATA_LEN 2
+
+ struct hclge_desc *desc;
+ u32 *reg_val = data;
+ __le32 *desc_data;
+ int nodata_num;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
+ HCLGE_32_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 32 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le32 *)(&desc[i].data[0]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
+ } else {
+ desc_data = (__le32 *)(&desc[i]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le32_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+#define HCLGE_64_BIT_DESC_NODATA_LEN 1
+
+ struct hclge_desc *desc;
+ u64 *reg_val = data;
+ __le64 *desc_data;
+ int nodata_len;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
+ HCLGE_64_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 64 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le64_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
+{
+ int i;
+
+ /* initialize command BD except the last one */
+ for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ /* initialize the last command BD */
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
+}
+
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
+ int *bd_num_list,
+ u32 type_num)
+{
+ u32 entries_per_desc, desc_index, index, offset, i;
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
+ int ret;
+
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx bd num fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ for (i = 0; i < type_num; i++) {
+ offset = hclge_dfx_bd_offset_list[i];
+ index = offset % entries_per_desc;
+ desc_index = offset / entries_per_desc;
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
+ }
+
+ return ret;
+}
+
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src, int bd_num,
+ enum hclge_opcode_type cmd)
+{
+ struct hclge_desc *desc = desc_src;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ for (i = 0; i < bd_num - 1; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ desc = desc_src;
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
+ cmd, ret);
+
+ return ret;
+}
+
+/* tnl_id = 0 means get sum of all tnl reg's value */
+static int hclge_dfx_reg_rpu_tnl_cmd_send(struct hclge_dev *hdev, u32 tnl_id,
+ struct hclge_desc *desc, int bd_num)
+{
+ int i, ret;
+
+ for (i = 0; i < bd_num; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_RPU_REG_0,
+ true);
+ if (i != bd_num - 1)
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ desc[0].data[0] = cpu_to_le32(tnl_id);
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to query dfx rpu tnl reg, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
+ void *data)
+{
+ int entries_per_desc, reg_num, desc_index, index, i;
+ struct hclge_desc *desc = desc_src;
+ u32 *reg = data;
+
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ reg_num = entries_per_desc * bd_num;
+ for (i = 0; i < reg_num; i++) {
+ index = i % entries_per_desc;
+ desc_index = i / entries_per_desc;
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
+ }
+
+ return reg_num;
+}
+
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int data_len_per_desc;
+ int *bd_num_list;
+ int ret;
+ u32 i;
+
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ goto out;
+ }
+
+ data_len_per_desc = sizeof_field(struct hclge_desc, data);
+ *len = 0;
+ for (i = 0; i < dfx_reg_type_num; i++)
+ *len += bd_num_list[i] * data_len_per_desc + HCLGE_REG_TLV_SIZE;
+
+ /**
+ * the num of dfx_rpu_0 is reused by each dfx_rpu_tnl
+ * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is
+ * starting at 0, so offset need '- 1'.
+ */
+ *len += (bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1] * data_len_per_desc +
+ HCLGE_REG_TLV_SIZE) * ae_dev->dev_specs.tnl_num;
+
+out:
+ kfree(bd_num_list);
+ return ret;
+}
+
+static int hclge_get_dfx_rpu_tnl_reg(struct hclge_dev *hdev, u32 *reg,
+ struct hclge_desc *desc_src,
+ int bd_num)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret = 0;
+ u8 i;
+
+ for (i = HCLGE_REG_RPU_TNL_ID_0; i <= ae_dev->dev_specs.tnl_num; i++) {
+ ret = hclge_dfx_reg_rpu_tnl_cmd_send(hdev, i, desc_src, bd_num);
+ if (ret)
+ break;
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RPU_TNL,
+ ARRAY_SIZE(desc_src->data) * bd_num,
+ reg);
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+ }
+
+ return ret;
+}
+
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int bd_num, bd_num_max, buf_len;
+ struct hclge_desc *desc_src;
+ int *bd_num_list;
+ u32 *reg = data;
+ int ret;
+ u32 i;
+
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ goto out;
+ }
+
+ bd_num_max = bd_num_list[0];
+ for (i = 1; i < dfx_reg_type_num; i++)
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
+
+ buf_len = sizeof(*desc_src) * bd_num_max;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
+ hclge_dfx_reg_opcode_list[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg fail, status is %d.\n", ret);
+ goto free;
+ }
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_DFX_BIOS_COMMON + i,
+ ARRAY_SIZE(desc_src->data) * bd_num,
+ reg);
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+ }
+
+ /**
+ * HCLGE_DFX_BD_OFFSET is starting at 1, but the array subscript is
+ * starting at 0, so offset need '- 1'.
+ */
+ bd_num = bd_num_list[HCLGE_DFX_RPU_0_BD_OFFSET - 1];
+ ret = hclge_get_dfx_rpu_tnl_reg(hdev, reg, desc_src, bd_num);
+
+free:
+ kfree(desc_src);
+out:
+ kfree(bd_num_list);
+ return ret;
+}
+
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ struct hnae3_knic_private_info *kinfo)
+{
+#define HCLGE_RING_REG_OFFSET 0x200
+#define HCLGE_RING_INT_REG_OFFSET 0x4
+
+ int i, j, reg_num;
+ int data_num_sum;
+ u32 *reg = data;
+
+ /* fetching per-PF registers valus from PF PCIe register space */
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_CMDQ, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+ data_num_sum = reg_num + HCLGE_REG_TLV_SPACE;
+
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_COMMON, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
+ data_num_sum += reg_num + HCLGE_REG_TLV_SPACE;
+
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
+ for (j = 0; j < kinfo->num_tqps; j++) {
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_RING, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ HCLGE_RING_REG_OFFSET * j);
+ }
+ data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) * kinfo->num_tqps;
+
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_TQP_INTR, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ HCLGE_RING_INT_REG_OFFSET * j);
+ }
+ data_num_sum += (reg_num + HCLGE_REG_TLV_SPACE) *
+ (hdev->num_msi_used - 1);
+
+ return data_num_sum;
+}
+
+static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
+ u32 *regs_num_64_bit)
+{
+ struct hclge_desc desc;
+ u32 total_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query register number cmd failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *regs_num_32_bit = le32_to_cpu(desc.data[0]);
+ *regs_num_64_bit = le32_to_cpu(desc.data[1]);
+
+ total_num = *regs_num_32_bit + *regs_num_64_bit;
+ if (!total_num)
+ return -EINVAL;
+
+ return 0;
+}
+
+int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
+ int cmdq_len, common_len, ring_len, tqp_intr_len;
+ int regs_len_32_bit, regs_len_64_bit;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg len failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ cmdq_len = HCLGE_REG_TLV_SIZE + sizeof(cmdq_reg_addr_list);
+ common_len = HCLGE_REG_TLV_SIZE + sizeof(common_reg_addr_list);
+ ring_len = HCLGE_REG_TLV_SIZE + sizeof(ring_reg_addr_list);
+ tqp_intr_len = HCLGE_REG_TLV_SIZE + sizeof(tqp_intr_reg_addr_list);
+ regs_len_32_bit = HCLGE_REG_TLV_SIZE + regs_num_32_bit * sizeof(u32);
+ regs_len_64_bit = HCLGE_REG_TLV_SIZE + regs_num_64_bit * sizeof(u64);
+
+ /* return the total length of all register values */
+ return HCLGE_REG_HEADER_SIZE + cmdq_len + common_len + ring_len *
+ kinfo->num_tqps + tqp_intr_len * (hdev->num_msi_used - 1) +
+ regs_len_32_bit + regs_len_64_bit + dfx_regs_len;
+}
+
+void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+#define HCLGE_REG_64_BIT_SPACE_MULTIPLE 2
+
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ u32 *reg = data;
+ int ret;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ reg += hclge_reg_get_header(reg);
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_32_BIT,
+ regs_num_32_bit, reg);
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 32 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg += regs_num_32_bit;
+
+ reg += hclge_reg_get_tlv(HCLGE_REG_TAG_QUERY_64_BIT,
+ regs_num_64_bit *
+ HCLGE_REG_64_BIT_SPACE_MULTIPLE, reg);
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 64 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg += regs_num_64_bit * HCLGE_REG_64_BIT_SPACE_MULTIPLE;
+
+ ret = hclge_get_dfx_reg(hdev, reg);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get dfx register failed, ret = %d.\n", ret);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h
new file mode 100644
index 000000000000..b6bc1ecb8054
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2023 Hisilicon Limited.
+
+#ifndef __HCLGE_REGS_H
+#define __HCLGE_REGS_H
+#include <linux/types.h>
+#include "hclge_comm_cmd.h"
+
+struct hnae3_handle;
+struct hclge_dev;
+
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc);
+int hclge_get_regs_len(struct hnae3_handle *handle);
+void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 34f02ca8d1d2..7a2f9233d695 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -6,6 +6,7 @@
#include <net/rtnetlink.h>
#include "hclgevf_cmd.h"
#include "hclgevf_main.h"
+#include "hclgevf_regs.h"
#include "hclge_mbx.h"
#include "hnae3.h"
#include "hclgevf_devlink.h"
@@ -33,58 +34,6 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
- HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
- HCLGE_COMM_NIC_CSQ_DEPTH_REG,
- HCLGE_COMM_NIC_CSQ_TAIL_REG,
- HCLGE_COMM_NIC_CSQ_HEAD_REG,
- HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
- HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
- HCLGE_COMM_NIC_CRQ_DEPTH_REG,
- HCLGE_COMM_NIC_CRQ_TAIL_REG,
- HCLGE_COMM_NIC_CRQ_HEAD_REG,
- HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
- HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
- HCLGE_COMM_CMDQ_INTR_EN_REG,
- HCLGE_COMM_CMDQ_INTR_GEN_REG};
-
-static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
- HCLGEVF_RST_ING,
- HCLGEVF_GRO_EN_REG};
-
-static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
- HCLGEVF_RING_RX_ADDR_H_REG,
- HCLGEVF_RING_RX_BD_NUM_REG,
- HCLGEVF_RING_RX_BD_LENGTH_REG,
- HCLGEVF_RING_RX_MERGE_EN_REG,
- HCLGEVF_RING_RX_TAIL_REG,
- HCLGEVF_RING_RX_HEAD_REG,
- HCLGEVF_RING_RX_FBD_NUM_REG,
- HCLGEVF_RING_RX_OFFSET_REG,
- HCLGEVF_RING_RX_FBD_OFFSET_REG,
- HCLGEVF_RING_RX_STASH_REG,
- HCLGEVF_RING_RX_BD_ERR_REG,
- HCLGEVF_RING_TX_ADDR_L_REG,
- HCLGEVF_RING_TX_ADDR_H_REG,
- HCLGEVF_RING_TX_BD_NUM_REG,
- HCLGEVF_RING_TX_PRIORITY_REG,
- HCLGEVF_RING_TX_TC_REG,
- HCLGEVF_RING_TX_MERGE_EN_REG,
- HCLGEVF_RING_TX_TAIL_REG,
- HCLGEVF_RING_TX_HEAD_REG,
- HCLGEVF_RING_TX_FBD_NUM_REG,
- HCLGEVF_RING_TX_OFFSET_REG,
- HCLGEVF_RING_TX_EBD_NUM_REG,
- HCLGEVF_RING_TX_EBD_OFFSET_REG,
- HCLGEVF_RING_TX_BD_ERR_REG,
- HCLGEVF_RING_EN_REG};
-
-static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
- HCLGEVF_TQP_INTR_GL0_REG,
- HCLGEVF_TQP_INTR_GL1_REG,
- HCLGEVF_TQP_INTR_GL2_REG,
- HCLGEVF_TQP_INTR_RL_REG};
-
/* hclgevf_cmd_send - send command to command queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor for describing the command
@@ -111,7 +60,7 @@ void hclgevf_arq_init(struct hclgevf_dev *hdev)
spin_unlock(&cmdq->crq.lock);
}
-static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
+struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
{
if (!handle->client)
return container_of(handle, struct hclgevf_dev, nic);
@@ -3258,72 +3207,6 @@ static void hclgevf_get_link_mode(struct hnae3_handle *handle,
*advertising = hdev->hw.mac.advertising;
}
-#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFDFCFBFA
-#define REG_NUM_PER_LINE 4
-#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
-
-static int hclgevf_get_regs_len(struct hnae3_handle *handle)
-{
- int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
-
- cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
- common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
- ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
- tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
-
- return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
- tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
-}
-
-static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
- void *data)
-{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, j, reg_um, separator_num;
- u32 *reg = data;
-
- *version = hdev->fw_version;
-
- /* fetching per-VF registers values from VF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
-
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (j = 0; j < hdev->num_tqps; j++) {
- for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw,
- ring_reg_addr_list[i] +
- HCLGEVF_TQP_REG_SIZE * j);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- }
-
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (j = 0; j < hdev->num_msi_used - 1; j++) {
- for (i = 0; i < reg_um; i++)
- *reg++ = hclgevf_read_dev(&hdev->hw,
- tqp_intr_reg_addr_list[i] +
- 4 * j);
- for (i = 0; i < separator_num; i++)
- *reg++ = SEPARATOR_VALUE;
- }
-}
-
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
struct hclge_mbx_port_base_vlan *port_base_vlan)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 59ca6c794d6d..81c16b8c8da2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -294,4 +294,5 @@ void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
struct hclge_mbx_port_base_vlan *port_base_vlan);
+struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
new file mode 100644
index 000000000000..65b9dcd38137
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2023 Hisilicon Limited.
+
+#include "hclgevf_main.h"
+#include "hclgevf_regs.h"
+#include "hnae3.h"
+
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
+ HCLGEVF_RST_ING,
+ HCLGEVF_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
+ HCLGEVF_RING_RX_ADDR_H_REG,
+ HCLGEVF_RING_RX_BD_NUM_REG,
+ HCLGEVF_RING_RX_BD_LENGTH_REG,
+ HCLGEVF_RING_RX_MERGE_EN_REG,
+ HCLGEVF_RING_RX_TAIL_REG,
+ HCLGEVF_RING_RX_HEAD_REG,
+ HCLGEVF_RING_RX_FBD_NUM_REG,
+ HCLGEVF_RING_RX_OFFSET_REG,
+ HCLGEVF_RING_RX_FBD_OFFSET_REG,
+ HCLGEVF_RING_RX_STASH_REG,
+ HCLGEVF_RING_RX_BD_ERR_REG,
+ HCLGEVF_RING_TX_ADDR_L_REG,
+ HCLGEVF_RING_TX_ADDR_H_REG,
+ HCLGEVF_RING_TX_BD_NUM_REG,
+ HCLGEVF_RING_TX_PRIORITY_REG,
+ HCLGEVF_RING_TX_TC_REG,
+ HCLGEVF_RING_TX_MERGE_EN_REG,
+ HCLGEVF_RING_TX_TAIL_REG,
+ HCLGEVF_RING_TX_HEAD_REG,
+ HCLGEVF_RING_TX_FBD_NUM_REG,
+ HCLGEVF_RING_TX_OFFSET_REG,
+ HCLGEVF_RING_TX_EBD_NUM_REG,
+ HCLGEVF_RING_TX_EBD_OFFSET_REG,
+ HCLGEVF_RING_TX_BD_ERR_REG,
+ HCLGEVF_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
+ HCLGEVF_TQP_INTR_GL0_REG,
+ HCLGEVF_TQP_INTR_GL1_REG,
+ HCLGEVF_TQP_INTR_GL2_REG,
+ HCLGEVF_TQP_INTR_RL_REG};
+
+enum hclgevf_reg_tag {
+ HCLGEVF_REG_TAG_CMDQ = 0,
+ HCLGEVF_REG_TAG_COMMON,
+ HCLGEVF_REG_TAG_RING,
+ HCLGEVF_REG_TAG_TQP_INTR,
+};
+
+#pragma pack(4)
+struct hclgevf_reg_tlv {
+ u16 tag;
+ u16 len;
+};
+
+struct hclgevf_reg_header {
+ u64 magic_number;
+ u8 is_vf;
+ u8 rsv[7];
+};
+
+#pragma pack()
+
+#define HCLGEVF_REG_TLV_SIZE sizeof(struct hclgevf_reg_tlv)
+#define HCLGEVF_REG_HEADER_SIZE sizeof(struct hclgevf_reg_header)
+#define HCLGEVF_REG_TLV_SPACE (sizeof(struct hclgevf_reg_tlv) / sizeof(u32))
+#define HCLGEVF_REG_HEADER_SPACE (sizeof(struct hclgevf_reg_header) / sizeof(u32))
+#define HCLGEVF_REG_MAGIC_NUMBER 0x686e733372656773 /* meaning is hns3regs */
+
+static u32 hclgevf_reg_get_header(void *data)
+{
+ struct hclgevf_reg_header *header = data;
+
+ header->magic_number = HCLGEVF_REG_MAGIC_NUMBER;
+ header->is_vf = 0x1;
+
+ return HCLGEVF_REG_HEADER_SPACE;
+}
+
+static u32 hclgevf_reg_get_tlv(u32 tag, u32 regs_num, void *data)
+{
+ struct hclgevf_reg_tlv *tlv = data;
+
+ tlv->tag = tag;
+ tlv->len = regs_num * sizeof(u32) + HCLGEVF_REG_TLV_SIZE;
+
+ return HCLGEVF_REG_TLV_SPACE;
+}
+
+int hclgevf_get_regs_len(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int cmdq_len, common_len, ring_len, tqp_intr_len;
+
+ cmdq_len = HCLGEVF_REG_TLV_SIZE + sizeof(cmdq_reg_addr_list);
+ common_len = HCLGEVF_REG_TLV_SIZE + sizeof(common_reg_addr_list);
+ ring_len = HCLGEVF_REG_TLV_SIZE + sizeof(ring_reg_addr_list);
+ tqp_intr_len = HCLGEVF_REG_TLV_SIZE + sizeof(tqp_intr_reg_addr_list);
+
+ /* return the total length of all register values */
+ return HCLGEVF_REG_HEADER_SIZE + cmdq_len + common_len +
+ tqp_intr_len * (hdev->num_msi_used - 1) +
+ ring_len * hdev->num_tqps;
+}
+
+void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+#define HCLGEVF_RING_REG_OFFSET 0x200
+#define HCLGEVF_RING_INT_REG_OFFSET 0x4
+
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, j, reg_um;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+ reg += hclgevf_reg_get_header(reg);
+
+ /* fetching per-VF registers values from VF PCIe register space */
+ reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+
+ reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
+
+ reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ for (j = 0; j < hdev->num_tqps; j++) {
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ HCLGEVF_RING_REG_OFFSET * j);
+ }
+
+ reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ HCLGEVF_RING_INT_REG_OFFSET * j);
+ }
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h
new file mode 100644
index 000000000000..77bdcf60a1af
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2023 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_REGS_H
+#define __HCLGEVF_REGS_H
+#include <linux/types.h>
+
+struct hnae3_handle;
+
+int hclgevf_get_regs_len(struct hnae3_handle *handle);
+void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data);
+#endif
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 4817eb13ca6f..75f3fd1d8d6e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -347,6 +347,5 @@ bool e1000_has_link(struct e1000_adapter *adapter);
void e1000_power_up_phy(struct e1000_adapter *);
void e1000_set_ethtool_ops(struct net_device *netdev);
void e1000_check_options(struct e1000_adapter *adapter);
-char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index b57a04954ccf..95cdd17134e5 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -343,7 +343,6 @@ struct e1000_host_mng_dhcp_cookie {
};
#endif
-bool e1000_check_mng_mode(struct e1000_hw *hw);
s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
@@ -352,7 +351,6 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw);
/* Filters (multicast, vlan, receive) */
u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
-void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
@@ -361,7 +359,6 @@ s32 e1000_setup_led(struct e1000_hw *hw);
s32 e1000_cleanup_led(struct e1000_hw *hw);
s32 e1000_led_on(struct e1000_hw *hw);
s32 e1000_led_off(struct e1000_hw *hw);
-s32 e1000_blink_led_start(struct e1000_hw *hw);
/* Adaptive IFS Functions */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 6ab261119801..563176fd436e 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -29,8 +29,6 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
s32 e1000e_setup_led_generic(struct e1000_hw *hw);
s32 e1000e_setup_link_generic(struct e1000_hw *hw);
-s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw);
-s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
void e1000_clear_vfta_generic(struct e1000_hw *hw);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 771a3c909c45..18a5e73b8680 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7021,6 +7021,8 @@ static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
struct e1000_adapter *adapter = netdev_priv(netdev);
int rc;
+ pdev->pme_poll = true;
+
rc = __e1000_resume(pdev);
if (rc)
return rc;
@@ -7682,7 +7684,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE);
- if (pci_dev_run_wake(pdev) && hw->mac.type != e1000_pch_cnp)
+ if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index 969120587cad..0e72abd178ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -220,7 +220,7 @@ static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev,
netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G");
return false;
}
- if (size < (sizeof(struct i40e_package_header) +
+ if (size < (sizeof(struct i40e_package_header) + sizeof(u32) +
sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
netdev_err(netdev, "Invalid DDP profile - size is too small.");
return false;
@@ -281,7 +281,7 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
return -EINVAL;
- if (size < (sizeof(struct i40e_package_header) +
+ if (size < (sizeof(struct i40e_package_header) + sizeof(u32) +
sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
netdev_err(netdev, "Invalid DDP recipe size.");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 5f61546f50d8..232131bedc3e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1455,7 +1455,7 @@ struct i40e_ddp_version {
struct i40e_package_header {
struct i40e_ddp_version version;
u32 segment_count;
- u32 segment_offset[1];
+ u32 segment_offset[];
};
/* Generic segment header */
@@ -1486,12 +1486,12 @@ struct i40e_profile_segment {
struct i40e_ddp_version version;
char name[I40E_DDP_NAME_SIZE];
u32 device_table_count;
- struct i40e_device_id_entry device_table[1];
+ struct i40e_device_id_entry device_table[];
};
struct i40e_section_table {
u32 section_count;
- u32 section_offset[1];
+ u32 section_offset[];
};
struct i40e_profile_section_header {
@@ -1523,7 +1523,7 @@ struct i40e_profile_aq_section {
u16 flags;
u8 param[16];
u16 datalen;
- u8 data[1];
+ u8 data[];
};
struct i40e_profile_info {
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index a1a80f13b1e8..674913184ebf 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -269,7 +269,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
struct orion_mdio_dev *dev;
int i, ret;
- type = (enum orion_mdio_bus_type)device_get_match_data(&pdev->dev);
+ type = (uintptr_t)device_get_match_data(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h b/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h
new file mode 100644
index 000000000000..0c741e752db6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Marvell.
+ */
+#ifndef __OCTEP_CP_VERSION_H__
+#define __OCTEP_CP_VERSION_H__
+
+#define OCTEP_CP_VERSION(a, b, c) ((((a) & 0xff) << 16) + \
+ (((b) & 0xff) << 8) + \
+ ((c) & 0xff))
+
+#endif /* __OCTEP_CP_VERSION_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
index dab61cc1acb5..9d53c1402cb4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
@@ -37,7 +37,9 @@
#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(m) (m)
#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(m) ((m) + 8)
+#define OCTEP_CTRL_MBOX_INFO_HOST_VERSION(m) ((m) + 16)
#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS(m) ((m) + 24)
+#define OCTEP_CTRL_MBOX_INFO_FW_VERSION(m) ((m) + 136)
#define OCTEP_CTRL_MBOX_INFO_FW_STATUS(m) ((m) + 144)
#define OCTEP_CTRL_MBOX_H2FQ_INFO(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ)
@@ -71,7 +73,7 @@ static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 sz)
int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
{
- u64 magic_num, status;
+ u64 magic_num, status, fw_versions;
if (!mbox)
return -EINVAL;
@@ -93,6 +95,9 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
return -EINVAL;
}
+ fw_versions = readq(OCTEP_CTRL_MBOX_INFO_FW_VERSION(mbox->barmem));
+ mbox->min_fw_version = ((fw_versions & 0xffffffff00000000ull) >> 32);
+ mbox->max_fw_version = (fw_versions & 0xffffffff);
mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(mbox->barmem));
writeq(OCTEP_CTRL_MBOX_STATUS_INIT,
@@ -113,6 +118,7 @@ int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
OCTEP_CTRL_MBOX_TOTAL_INFO_SZ +
mbox->h2fq.sz;
+ writeq(mbox->version, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem));
/* ensure ready state is seen after everything is initialized */
wmb();
writeq(OCTEP_CTRL_MBOX_STATUS_READY,
@@ -258,6 +264,7 @@ int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox)
if (!mbox->barmem)
return -EINVAL;
+ writeq(0, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem));
writeq(OCTEP_CTRL_MBOX_STATUS_INVALID,
OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
/* ensure uninit state is written before uninitialization */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
index 9c4ff0fba6a0..7f8135788efc 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -121,6 +121,8 @@ struct octep_ctrl_mbox_q {
};
struct octep_ctrl_mbox {
+ /* control plane version */
+ u64 version;
/* size of bar memory */
u32 barmem_sz;
/* pointer to BAR memory */
@@ -133,6 +135,10 @@ struct octep_ctrl_mbox {
struct mutex h2fq_lock;
/* lock for f2hq */
struct mutex f2hq_lock;
+ /* Min control plane version supported by firmware */
+ u32 min_fw_version;
+ /* Max control plane version supported by firmware */
+ u32 max_fw_version;
};
/* Initialize control mbox.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
index 1cc6af2feb38..4c6d91a8c83e 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -14,6 +14,9 @@
#include "octep_main.h"
#include "octep_ctrl_net.h"
+/* Control plane version */
+#define OCTEP_CP_VERSION_CURRENT OCTEP_CP_VERSION(1, 0, 0)
+
static const u32 req_hdr_sz = sizeof(union octep_ctrl_net_req_hdr);
static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu);
static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac);
@@ -21,6 +24,18 @@ static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state);
static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info);
static atomic_t ctrl_net_msg_id;
+/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */
+static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = {
+ [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_LINK_INFO] =
+ OCTEP_CP_VERSION(1, 0, 0)
+};
+
+/* Control plane version in which OCTEP_CTRL_NET_F2H_CMD was added */
+static const u32 octep_ctrl_net_f2h_cmd_versions[OCTEP_CTRL_NET_F2H_CMD_MAX] = {
+ [OCTEP_CTRL_NET_F2H_CMD_INVALID ... OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS] =
+ OCTEP_CP_VERSION(1, 0, 0)
+};
+
static void init_send_req(struct octep_ctrl_mbox_msg *msg, void *buf,
u16 sz, int vfid)
{
@@ -41,7 +56,13 @@ static int octep_send_mbox_req(struct octep_device *oct,
struct octep_ctrl_net_wait_data *d,
bool wait_for_response)
{
- int err, ret;
+ int err, ret, cmd;
+
+ /* check if firmware is compatible for this request */
+ cmd = d->data.req.hdr.s.cmd;
+ if (octep_ctrl_net_h2f_cmd_versions[cmd] > oct->ctrl_mbox.max_fw_version ||
+ octep_ctrl_net_h2f_cmd_versions[cmd] < oct->ctrl_mbox.min_fw_version)
+ return -EOPNOTSUPP;
err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &d->msg);
if (err < 0)
@@ -84,12 +105,16 @@ int octep_ctrl_net_init(struct octep_device *oct)
/* Initialize control mbox */
ctrl_mbox = &oct->ctrl_mbox;
+ ctrl_mbox->version = OCTEP_CP_VERSION_CURRENT;
ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf);
ret = octep_ctrl_mbox_init(ctrl_mbox);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize control mbox\n");
return ret;
}
+ dev_info(&pdev->dev, "Control plane versions host: %llx, firmware: %x:%x\n",
+ ctrl_mbox->version, ctrl_mbox->min_fw_version,
+ ctrl_mbox->max_fw_version);
oct->ctrl_mbox_ifstats_offset = ctrl_mbox->barmem_sz;
return 0;
@@ -273,9 +298,17 @@ static int process_mbox_notify(struct octep_device *oct,
{
struct net_device *netdev = oct->netdev;
struct octep_ctrl_net_f2h_req *req;
+ int cmd;
req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg;
- switch (req->hdr.s.cmd) {
+ cmd = req->hdr.s.cmd;
+
+ /* check if we support this command */
+ if (octep_ctrl_net_f2h_cmd_versions[cmd] > OCTEP_CP_VERSION_CURRENT ||
+ octep_ctrl_net_f2h_cmd_versions[cmd] < OCTEP_CP_VERSION_CURRENT)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
if (netif_running(netdev)) {
if (req->link.state) {
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
index 37880dd79116..1c2ef4ee31d9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -7,6 +7,8 @@
#ifndef __OCTEP_CTRL_NET_H__
#define __OCTEP_CTRL_NET_H__
+#include "octep_cp_version.h"
+
#define OCTEP_CTRL_NET_INVALID_VFID (-1)
/* Supported commands */
@@ -39,12 +41,14 @@ enum octep_ctrl_net_h2f_cmd {
OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+ OCTEP_CTRL_NET_H2F_CMD_MAX
};
/* Supported fw to host commands */
enum octep_ctrl_net_f2h_cmd {
OCTEP_CTRL_NET_F2H_CMD_INVALID = 0,
OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS,
+ OCTEP_CTRL_NET_F2H_CMD_MAX
};
union octep_ctrl_net_req_hdr {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 5c8f9fc15ff8..237f82082ebe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -580,7 +580,9 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
- if (*features & (BIT_ULL(NPC_IPPROTO_AH) | BIT_ULL(NPC_IPPROTO_ESP)))
+ /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
+ (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
*features |= BIT_ULL(NPC_IPSEC_SPI);
/* for vlan ethertypes corresponding layer type should be in the key */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 8336cea16aff..dce3cea00032 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1905,31 +1905,16 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t
}
}
- if ((changed & NETIF_F_HW_TC) && tc) {
- if (!pfvf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable TC, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
if ((changed & NETIF_F_HW_TC) && !tc &&
- pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
+ otx2_tc_flower_rule_cnt(pfvf)) {
netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
return -EBUSY;
}
if ((changed & NETIF_F_NTUPLE) && ntuple &&
- (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
- netdev_err(netdev,
- "Can't enable NTUPLE when TC is active, disable TC and retry\n");
- return -EINVAL;
- }
-
- if ((changed & NETIF_F_HW_TC) && tc &&
- (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
+ otx2_tc_flower_rule_cnt(pfvf) && !(changed & NETIF_F_HW_TC)) {
netdev_err(netdev,
- "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
+ "Can't enable NTUPLE when TC flower offload is active, disable TC rules and retry\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 25e99fd2e3fd..5fd05d94de7c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -940,6 +940,15 @@ static inline u64 otx2_convert_rate(u64 rate)
return converted_rate;
}
+static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf)
+{
+ /* return here if MCAM entries not allocated */
+ if (!pfvf->flow_cfg)
+ return 0;
+
+ return pfvf->flow_cfg->nr_flows;
+}
+
/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3d82ec890666..af8460bb257b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -212,6 +212,9 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
ret = mlx5_load_one_devl_locked(dev, true);
+ if (ret)
+ return ret;
+ ret = mlx5_fw_reset_verify_fw_complete(dev, extack);
break;
default:
/* Unsupported action should not get to this function */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index defba5bd91d9..961f75da6227 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -6,6 +6,14 @@
#include <net/devlink.h>
+enum mlx5_devlink_resource_id {
+ MLX5_DL_RES_MAX_LOCAL_SFS = 1,
+ MLX5_DL_RES_MAX_EXTERNAL_SFS,
+
+ __MLX5_ID_RES_MAX,
+ MLX5_ID_RES_MAX = __MLX5_ID_RES_MAX - 1,
+};
+
enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index 0107e4e73bb0..415840c3ef84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -18,6 +18,7 @@ void mlx5e_reporter_tx_create(struct mlx5e_priv *priv);
void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv);
void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
+void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq);
int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index b0b429a0321e..bb11e644d24f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -2,9 +2,12 @@
// Copyright (c) 2020 Mellanox Technologies
#include "en/ptp.h"
+#include "en/health.h"
#include "en/txrx.h"
#include "en/params.h"
#include "en/fs_tt_redirect.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
struct mlx5e_ptp_fs {
struct mlx5_flow_handle *l2_rule;
@@ -19,6 +22,48 @@ struct mlx5e_ptp_params {
struct mlx5e_rq_param rq_param;
};
+struct mlx5e_ptp_port_ts_cqe_tracker {
+ u8 metadata_id;
+ bool inuse : 1;
+ struct list_head entry;
+};
+
+struct mlx5e_ptp_port_ts_cqe_list {
+ struct mlx5e_ptp_port_ts_cqe_tracker *nodes;
+ struct list_head tracker_list_head;
+ /* Sync list operations in xmit and napi_poll contexts */
+ spinlock_t tracker_list_lock;
+};
+
+static inline void
+mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
+{
+ struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
+
+ WARN_ON_ONCE(tracker->inuse);
+ tracker->inuse = true;
+ spin_lock(&list->tracker_list_lock);
+ list_add_tail(&tracker->entry, &list->tracker_list_head);
+ spin_unlock(&list->tracker_list_lock);
+}
+
+static void
+mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
+{
+ struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
+
+ WARN_ON_ONCE(!tracker->inuse);
+ tracker->inuse = false;
+ spin_lock(&list->tracker_list_lock);
+ list_del(&tracker->entry);
+ spin_unlock(&list->tracker_list_lock);
+}
+
+void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
+{
+ mlx5e_ptp_port_ts_cqe_list_add(ptpsq->ts_cqe_pending_list, metadata);
+}
+
struct mlx5e_skb_cb_hwtstamp {
ktime_t cqe_hwtstamp;
ktime_t port_hwtstamp;
@@ -79,75 +124,97 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
-#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
-
-static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_ci, u16 skb_id)
+static struct sk_buff *
+mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata)
{
- return (ptpsq->ts_cqe_ctr_mask && (skb_ci != skb_id));
+ return map->data[metadata];
}
-static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
+static struct sk_buff *
+mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata)
{
- u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
- u16 skb_pi = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
+ struct sk_buff *skb;
- if (PTP_WQE_CTR2IDX(skb_id - skb_ci) >= PTP_WQE_CTR2IDX(skb_pi - skb_ci))
- return true;
+ skb = map->data[metadata];
+ map->data[metadata] = NULL;
- return false;
+ return skb;
}
-static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_ci,
- u16 skb_id, int budget)
+static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map *map)
{
- struct skb_shared_hwtstamps hwts = {};
- struct sk_buff *skb;
+ /* Considered beginning unhealthy state if size * 15 / 2^4 cannot be reclaimed. */
+ return map->undelivered_counter > (map->capacity >> 4) * 15;
+}
- ptpsq->cq_stats->resync_event++;
+static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
+ ktime_t port_tstamp)
+{
+ struct mlx5e_ptp_port_ts_cqe_list *cqe_list = ptpsq->ts_cqe_pending_list;
+ ktime_t timeout = ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT);
+ struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
+ struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
+
+ spin_lock(&cqe_list->tracker_list_lock);
+ list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
+ struct sk_buff *skb =
+ mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
+ ktime_t dma_tstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+
+ if (!dma_tstamp ||
+ ktime_after(ktime_add(dma_tstamp, timeout), port_tstamp))
+ break;
- while (skb_ci != skb_id) {
- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
- hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
- skb_tstamp_tx(skb, &hwts);
- ptpsq->cq_stats->resync_cqe++;
- napi_consume_skb(skb, budget);
- skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ metadata_map->undelivered_counter++;
+ WARN_ON_ONCE(!pos->inuse);
+ pos->inuse = false;
+ list_del(&pos->entry);
}
+ spin_unlock(&cqe_list->tracker_list_lock);
}
+#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
int budget)
{
- u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
- u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
+ u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
+ bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe);
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct sk_buff *skb;
ktime_t hwtstamp;
- if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
- ptpsq->cq_stats->err_cqe++;
- goto out;
+ if (likely(pending_cqe_list->nodes[metadata_id].inuse)) {
+ mlx5e_ptp_port_ts_cqe_list_remove(pending_cqe_list, metadata_id);
+ } else {
+ /* Reclaim space in the unlikely event CQE was delivered after
+ * marking it late.
+ */
+ ptpsq->metadata_map.undelivered_counter--;
+ ptpsq->cq_stats->late_cqe++;
}
- if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_ci, skb_id)) {
- if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
- /* already handled by a previous resync */
- ptpsq->cq_stats->ooo_cqe_drop++;
- return;
- }
- mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_ci, skb_id, budget);
+ skb = mlx5e_ptp_metadata_map_remove(&ptpsq->metadata_map, metadata_id);
+
+ if (unlikely(is_err_cqe)) {
+ ptpsq->cq_stats->err_cqe++;
+ goto out;
}
- skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
ptpsq->cq_stats->cqe++;
+ mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
+ mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
+ if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
+ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
}
static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
@@ -291,36 +358,86 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{
- int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
- struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
+ struct mlx5e_ptp_metadata_fifo *metadata_freelist = &ptpsq->metadata_freelist;
+ struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
+ struct mlx5e_ptp_port_ts_cqe_list *cqe_list;
+ int db_sz;
+ int md;
- ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
- GFP_KERNEL, numa);
- if (!ptpsq->skb_fifo.fifo)
+ cqe_list = kvzalloc_node(sizeof(*ptpsq->ts_cqe_pending_list), GFP_KERNEL, numa);
+ if (!cqe_list)
return -ENOMEM;
+ ptpsq->ts_cqe_pending_list = cqe_list;
+
+ db_sz = min_t(u32, mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq),
+ 1 << MLX5_CAP_GEN_2(ptpsq->txqsq.mdev,
+ ts_cqe_metadata_size2wqe_counter));
+ ptpsq->ts_cqe_ctr_mask = db_sz - 1;
+
+ cqe_list->nodes = kvzalloc_node(array_size(db_sz, sizeof(*cqe_list->nodes)),
+ GFP_KERNEL, numa);
+ if (!cqe_list->nodes)
+ goto free_cqe_list;
+ INIT_LIST_HEAD(&cqe_list->tracker_list_head);
+ spin_lock_init(&cqe_list->tracker_list_lock);
+
+ metadata_freelist->data =
+ kvzalloc_node(array_size(db_sz, sizeof(*metadata_freelist->data)),
+ GFP_KERNEL, numa);
+ if (!metadata_freelist->data)
+ goto free_cqe_list_nodes;
+ metadata_freelist->mask = ptpsq->ts_cqe_ctr_mask;
+
+ for (md = 0; md < db_sz; ++md) {
+ cqe_list->nodes[md].metadata_id = md;
+ metadata_freelist->data[md] = md;
+ }
+ metadata_freelist->pc = db_sz;
+
+ metadata_map->data =
+ kvzalloc_node(array_size(db_sz, sizeof(*metadata_map->data)),
+ GFP_KERNEL, numa);
+ if (!metadata_map->data)
+ goto free_metadata_freelist;
+ metadata_map->capacity = db_sz;
- ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
- ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
- ptpsq->skb_fifo.mask = wq_sz - 1;
- if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
- ptpsq->ts_cqe_ctr_mask =
- (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
return 0;
+
+free_metadata_freelist:
+ kvfree(metadata_freelist->data);
+free_cqe_list_nodes:
+ kvfree(cqe_list->nodes);
+free_cqe_list:
+ kvfree(cqe_list);
+ return -ENOMEM;
}
-static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
+static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map)
{
- while (*skb_fifo->pc != *skb_fifo->cc) {
- struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
+ int idx;
+
+ for (idx = 0; idx < map->capacity; ++idx) {
+ struct sk_buff *skb = map->data[idx];
dev_kfree_skb_any(skb);
}
}
-static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
+static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq *ptpsq)
{
- mlx5e_ptp_drain_skb_fifo(skb_fifo);
- kvfree(skb_fifo->fifo);
+ mlx5e_ptp_drain_metadata_map(&ptpsq->metadata_map);
+ kvfree(ptpsq->metadata_map.data);
+ kvfree(ptpsq->metadata_freelist.data);
+ kvfree(ptpsq->ts_cqe_pending_list->nodes);
+ kvfree(ptpsq->ts_cqe_pending_list);
+}
+
+static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
+{
+ struct mlx5e_ptpsq *ptpsq =
+ container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
+
+ mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
}
static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
@@ -348,11 +465,12 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
if (err)
goto err_free_txqsq;
- err = mlx5e_ptp_alloc_traffic_db(ptpsq,
- dev_to_node(mlx5_core_dma_dev(c->mdev)));
+ err = mlx5e_ptp_alloc_traffic_db(ptpsq, dev_to_node(mlx5_core_dma_dev(c->mdev)));
if (err)
goto err_free_txqsq;
+ INIT_WORK(&ptpsq->report_unhealthy_work, mlx5e_ptpsq_unhealthy_work);
+
return 0;
err_free_txqsq:
@@ -366,7 +484,9 @@ static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct mlx5_core_dev *mdev = sq->mdev;
- mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
+ if (current_work() != &ptpsq->report_unhealthy_work)
+ cancel_work_sync(&ptpsq->report_unhealthy_work);
+ mlx5e_ptp_free_traffic_db(ptpsq);
cancel_work_sync(&sq->recover_work);
mlx5e_ptp_destroy_sq(mdev, sq->sqn);
mlx5e_free_txqsq_descs(sq);
@@ -534,7 +654,10 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
/* SQ */
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
- params->log_sq_size = orig->log_sq_size;
+ params->log_sq_size =
+ min(MLX5_CAP_GEN_2(c->mdev, ts_cqe_metadata_size2wqe_counter),
+ MLX5E_PTP_MAX_LOG_SQ_SIZE);
+ params->log_sq_size = min(params->log_sq_size, orig->log_sq_size);
mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
}
/* RQ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index cc7efde88ac3..7b700d0f956a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -7,18 +7,38 @@
#include "en.h"
#include "en_stats.h"
#include "en/txrx.h"
+#include <linux/ktime.h>
#include <linux/ptp_classify.h>
+#include <linux/time64.h>
+#include <linux/workqueue.h>
#define MLX5E_PTP_CHANNEL_IX 0
+#define MLX5E_PTP_MAX_LOG_SQ_SIZE (8U)
+#define MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT (1 * NSEC_PER_SEC)
+
+struct mlx5e_ptp_metadata_fifo {
+ u8 cc;
+ u8 pc;
+ u8 mask;
+ u8 *data;
+};
+
+struct mlx5e_ptp_metadata_map {
+ u16 undelivered_counter;
+ u16 capacity;
+ struct sk_buff **data;
+};
struct mlx5e_ptpsq {
struct mlx5e_txqsq txqsq;
struct mlx5e_cq ts_cq;
- u16 skb_fifo_cc;
- u16 skb_fifo_pc;
- struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_ptp_cq_stats *cq_stats;
u16 ts_cqe_ctr_mask;
+
+ struct work_struct report_unhealthy_work;
+ struct mlx5e_ptp_port_ts_cqe_list *ts_cqe_pending_list;
+ struct mlx5e_ptp_metadata_fifo metadata_freelist;
+ struct mlx5e_ptp_metadata_map metadata_map;
};
enum {
@@ -69,12 +89,35 @@ static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
fk.ports.dst == htons(PTP_EV_PORT));
}
-static inline bool mlx5e_ptpsq_fifo_has_room(struct mlx5e_txqsq *sq)
+static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *fifo, u8 metadata)
{
- if (!sq->ptpsq)
- return true;
+ fifo->data[fifo->mask & fifo->pc++] = metadata;
+}
+
+static inline u8
+mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
+{
+ return fifo->data[fifo->mask & fifo->cc++];
+}
- return mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo);
+static inline void
+mlx5e_ptp_metadata_map_put(struct mlx5e_ptp_metadata_map *map,
+ struct sk_buff *skb, u8 metadata)
+{
+ WARN_ON_ONCE(map->data[metadata]);
+ map->data[metadata] = skb;
+}
+
+static inline bool mlx5e_ptpsq_metadata_freelist_empty(struct mlx5e_ptpsq *ptpsq)
+{
+ struct mlx5e_ptp_metadata_fifo *freelist;
+
+ if (likely(!ptpsq))
+ return false;
+
+ freelist = &ptpsq->metadata_freelist;
+
+ return freelist->pc == freelist->cc;
}
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
@@ -89,6 +132,8 @@ void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
const struct mlx5e_profile *profile);
int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
+void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata);
+
enum {
MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0),
MLX5E_SKB_CB_PORT_HWTSTAMP = BIT(1),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index b35ff289af49..ff8242f67c54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -164,6 +164,43 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
return err;
}
+static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
+{
+ struct mlx5e_ptpsq *ptpsq = ctx;
+ struct mlx5e_channels *chs;
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int carrier_ok;
+ int err;
+
+ if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &ptpsq->txqsq.state))
+ return 0;
+
+ priv = ptpsq->txqsq.priv;
+
+ mutex_lock(&priv->state_lock);
+ chs = &priv->channels;
+ netdev = priv->netdev;
+
+ carrier_ok = netif_carrier_ok(netdev);
+ netif_carrier_off(netdev);
+
+ mlx5e_deactivate_priv_channels(priv);
+
+ mlx5e_ptp_close(chs->ptp);
+ err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
+
+ mlx5e_activate_priv_channels(priv);
+
+ /* return carrier back if needed */
+ if (carrier_ok)
+ netif_carrier_on(netdev);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
/* state lock cannot be grabbed within this function.
* It can cause a dead lock or a read-after-free.
*/
@@ -516,6 +553,15 @@ static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlin
return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
}
+static int mlx5e_tx_reporter_ptpsq_unhealthy_dump(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5e_ptpsq *ptpsq = ctx;
+
+ return mlx5e_tx_reporter_dump_sq(priv, fmsg, &ptpsq->txqsq);
+}
+
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
@@ -621,6 +667,25 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
return to_ctx.status;
}
+void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
+{
+ struct mlx5e_ptp_metadata_map *map = &ptpsq->metadata_map;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
+ struct mlx5e_cq *ts_cq = &ptpsq->ts_cq;
+ struct mlx5e_priv *priv = txqsq->priv;
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = ptpsq;
+ err_ctx.recover = mlx5e_tx_reporter_ptpsq_unhealthy_recover;
+ err_ctx.dump = mlx5e_tx_reporter_ptpsq_unhealthy_dump;
+ snprintf(err_str, sizeof(err_str),
+ "Unhealthy TX port TS queue: %d, SQ: 0x%x, CQ: 0x%x, Undelivered CQEs: %u Map Capacity: %u",
+ txqsq->ch_ix, txqsq->sqn, ts_cq->mcq.cqn, map->undelivered_counter, map->capacity);
+
+ mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
+}
+
static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
.name = "tx",
.recover = mlx5e_tx_reporter_recover,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 04195a673a6b..dff02434ff45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -2061,7 +2061,8 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
struct mlx5e_params new_params;
int err;
- if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
+ if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) ||
+ !MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
return -EOPNOTSUPP;
/* Don't allow changing the PTP state if HTB offload is active, because
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 07b84d668fcc..52141729444e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -2142,9 +2142,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
- { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
- { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
- { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
};
static const struct counter_desc ptp_rq_stats_desc[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 1ff8a06027dc..409e9a47e433 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -449,9 +449,7 @@ struct mlx5e_ptp_cq_stats {
u64 err_cqe;
u64 abort;
u64 abort_abs_diff_ns;
- u64 resync_cqe;
- u64 resync_event;
- u64 ooo_cqe_drop;
+ u64 late_cqe;
};
struct mlx5e_rep_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index c7eb6b238c2b..d41435c22ce5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -372,7 +372,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
const struct mlx5e_tx_attr *attr,
const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
- bool xmit_more)
+ struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
bool send_doorbell;
@@ -394,11 +394,16 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_tx_check_stop(sq);
- if (unlikely(sq->ptpsq)) {
+ if (unlikely(sq->ptpsq &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+ u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
+
mlx5e_skb_cb_hwtstamp_init(skb);
- mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
+ mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
+ mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
+ metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
- !mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) {
+ mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
netif_tx_stop_queue(sq->txq);
sq->stats->stopped++;
}
@@ -483,13 +488,16 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
+ mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
return;
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
+ if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
+ be32_to_cpu(eseg->flow_table_metadata));
mlx5e_tx_flush(sq);
}
@@ -645,9 +653,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
- if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
- ptpsq->ts_cqe_ctr_mask);
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ eseg->flow_table_metadata =
+ cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist));
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
@@ -766,7 +774,7 @@ void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
{
if (netif_tx_queue_stopped(sq->txq) &&
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
- mlx5e_ptpsq_fifo_has_room(sq) &&
+ !mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
sq->stats->wake++;
@@ -1031,7 +1039,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
+ mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, eseg, xmit_more);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index e391535e1ab1..46b8c60ac39a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -535,6 +535,28 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
}
+static bool
+esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
+{
+ bool vf_dest = false, pf_dest = false;
+ int i;
+
+ for (i = 0; i < max_dest; i++) {
+ if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ continue;
+
+ if (dests[i].vport.num == MLX5_VPORT_UPLINK)
+ pf_dest = true;
+ else
+ vf_dest = true;
+
+ if (vf_dest && pf_dest)
+ return true;
+ }
+
+ return false;
+}
+
static int
esw_setup_dests(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
@@ -671,6 +693,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
rule = ERR_PTR(err);
goto err_create_goto_table;
}
+
+ /* Header rewrite with combined wire+loopback in FDB is not allowed */
+ if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
+ esw_dests_to_vf_pf_vports(dest, i)) {
+ esw_warn(esw->dev,
+ "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
+ rule = ERR_PTR(-EINVAL);
+ goto err_esw_get;
+ }
}
if (esw_attr->decap_pkt_reformat)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index fb2035a5ec99..58f4c0d0fafa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -143,90 +143,86 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
if (MLX5_CAP_GEN(dev, port_selection_cap)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_PORT_SELECTION, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, hca_cap_2)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_GENERAL_2, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, pg)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ODP, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, atomic)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ATOMIC, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, roce)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ROCE, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, nic_flow_table) ||
MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_FLOW_TABLE, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_ESWITCH_MANAGER(dev)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
- if (err)
- return err;
- }
-
- if (MLX5_CAP_GEN(dev, vector_calc)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ESWITCH, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, qos)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_QOS, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, debug))
- mlx5_core_get_caps(dev, MLX5_CAP_DEBUG);
+ mlx5_core_get_caps_mode(dev, MLX5_CAP_DEBUG, HCA_CAP_OPMOD_GET_CUR);
if (MLX5_CAP_GEN(dev, pcam_reg))
mlx5_get_pcam_reg(dev);
if (MLX5_CAP_GEN(dev, mcam_reg)) {
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128);
- mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF);
mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F);
}
@@ -234,57 +230,52 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
mlx5_get_qcam_reg(dev);
if (MLX5_CAP_GEN(dev, device_memory)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_MEM);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_DEV_MEM, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, event_cap)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_DEV_EVENT, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_TLS, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN_64(dev, general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_VDPA_EMULATION, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, ipsec_offload)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_IPSEC, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, crypto)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_CRYPTO);
- if (err)
- return err;
- }
-
- if (MLX5_CAP_GEN(dev, shampo)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_CRYPTO, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN_64(dev, general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_MACSEC);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_MACSEC, HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, adv_virtualization)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ADV_VIRTUALIZATION);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ADV_VIRTUALIZATION,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 4804990b7f22..e87766f91150 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -127,17 +127,23 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
goto out;
+ if (!reset_state)
+ return 0;
+
switch (reset_state) {
case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION:
case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS:
- NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered");
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset still in progress");
return -EBUSY;
- case MLX5_MFRL_REG_RESET_STATE_TIMEOUT:
- NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout");
+ case MLX5_MFRL_REG_RESET_STATE_NEG_TIMEOUT:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset negotiation timeout");
return -ETIMEDOUT;
case MLX5_MFRL_REG_RESET_STATE_NACK:
NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset");
return -EPERM;
+ case MLX5_MFRL_REG_RESET_STATE_UNLOAD_TIMEOUT:
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset unload timeout");
+ return -ETIMEDOUT;
}
out:
@@ -151,7 +157,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
- int err;
+ int err, rst_res;
set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
@@ -164,13 +170,34 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
return 0;
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
- if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state))
- return mlx5_fw_reset_get_reset_state_err(dev, extack);
+ if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state)) {
+ rst_res = mlx5_fw_reset_get_reset_state_err(dev, extack);
+ return rst_res ? rst_res : err;
+ }
NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed");
return mlx5_cmd_check(dev, err, in, out);
}
+int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev,
+ struct netlink_ext_ack *extack)
+{
+ u8 rst_state;
+ int err;
+
+ err = mlx5_fw_reset_get_reset_state_err(dev, extack);
+ if (err)
+ return err;
+
+ rst_state = mlx5_get_fw_rst_state(dev);
+ if (!rst_state)
+ return 0;
+
+ mlx5_core_err(dev, "Sync reset did not complete, state=%d\n", rst_state);
+ NL_SET_ERR_MSG_MOD(extack, "Sync reset did not complete successfully");
+ return rst_state;
+}
+
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
{
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index c57465595f7c..ea527d06a85f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -12,6 +12,8 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev,
+ struct netlink_ext_ack *extack);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f4fe06a5042e..15561965d2af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -361,9 +361,8 @@ void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay);
-static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
- enum mlx5_cap_type cap_type,
- enum mlx5_cap_mode cap_mode)
+int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
{
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
@@ -1620,21 +1619,24 @@ static int mlx5_query_hca_caps_light(struct mlx5_core_dev *dev)
return err;
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, nic_flow_table) ||
MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
if (MLX5_CAP_GEN_64(dev, general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION);
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_VDPA_EMULATION,
+ HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
}
@@ -1714,7 +1716,6 @@ static const int types[] = {
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
- MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
MLX5_CAP_DEBUG,
MLX5_CAP_DEV_MEM,
@@ -1723,7 +1724,6 @@ static const int types[] = {
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
MLX5_CAP_PORT_SELECTION,
- MLX5_CAP_DEV_SHAMPO,
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 8e2028d20a9e..124352459c23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -174,6 +174,9 @@ static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
#define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \
mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__)
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
+int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode);
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 8e2abbab05f0..05e148db9889 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -129,7 +129,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
err = auxiliary_device_add(&sf_dev->adev);
if (err) {
- put_device(&sf_dev->adev.dev);
+ auxiliary_device_uninit(&sf_dev->adev);
goto add_err;
}
@@ -167,7 +167,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
if (!max_functions)
return 0;
- base_id = MLX5_CAP_GEN(table->dev, sf_base_id);
+ base_id = mlx5_sf_start_function_id(table->dev);
if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
return 0;
@@ -185,7 +185,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
else
mlx5_core_err(table->dev,
- "SF DEV: teardown state for invalid dev index=%d fn_id=0x%x\n",
+ "SF DEV: teardown state for invalid dev index=%d sfnum=0x%x\n",
sf_index, event->sw_function_id);
break;
case MLX5_VHCA_STATE_ACTIVE:
@@ -209,7 +209,7 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
int i;
max_functions = mlx5_sf_max_functions(dev);
- function_id = MLX5_CAP_GEN(dev, sf_base_id);
+ function_id = mlx5_sf_start_function_id(dev);
/* Arm the vhca context as the vhca event notifier */
for (i = 0; i < max_functions; i++) {
err = mlx5_vhca_event_arm(dev, function_id);
@@ -234,7 +234,7 @@ static void mlx5_sf_dev_add_active_work(struct work_struct *work)
int i;
max_functions = mlx5_sf_max_functions(dev);
- function_id = MLX5_CAP_GEN(dev, sf_base_id);
+ function_id = mlx5_sf_start_function_id(dev);
for (i = 0; i < max_functions; i++, function_id++) {
if (table->stop_active_wq)
return;
@@ -299,7 +299,7 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
unsigned int max_sfs;
int err;
- if (!mlx5_sf_dev_supported(dev) || !mlx5_vhca_event_supported(dev))
+ if (!mlx5_sf_dev_supported(dev))
return;
table = kzalloc(sizeof(*table), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index 17aa348989cb..1f613320fe07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -9,6 +9,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "diag/sf_tracepoint.h"
+#include "devlink.h"
struct mlx5_sf_hw {
u32 usr_sfnum;
@@ -243,31 +244,61 @@ static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc)
kfree(hwc->sfs);
}
+static void mlx5_sf_hw_table_res_unregister(struct mlx5_core_dev *dev)
+{
+ devl_resources_unregister(priv_to_devlink(dev));
+}
+
+static int mlx5_sf_hw_table_res_register(struct mlx5_core_dev *dev, u16 max_fn,
+ u16 max_ext_fn)
+{
+ struct devlink_resource_size_params size_params;
+ struct devlink *devlink = priv_to_devlink(dev);
+ int err;
+
+ devlink_resource_size_params_init(&size_params, max_fn, max_fn, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devl_resource_register(devlink, "max_local_SFs", max_fn, MLX5_DL_RES_MAX_LOCAL_SFS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP, &size_params);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, max_ext_fn, max_ext_fn, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ return devl_resource_register(devlink, "max_external_SFs", max_ext_fn,
+ MLX5_DL_RES_MAX_EXTERNAL_SFS, DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_hw_table *table;
u16 max_ext_fn = 0;
u16 ext_base_id = 0;
- u16 max_fn = 0;
u16 base_id;
+ u16 max_fn;
int err;
if (!mlx5_vhca_event_supported(dev))
return 0;
- if (mlx5_sf_supported(dev))
- max_fn = mlx5_sf_max_functions(dev);
+ max_fn = mlx5_sf_max_functions(dev);
err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id);
if (err)
return err;
+ if (mlx5_sf_hw_table_res_register(dev, max_fn, max_ext_fn))
+ mlx5_core_dbg(dev, "failed to register max SFs resources");
+
if (!max_fn && !max_ext_fn)
return 0;
table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
+ if (!table) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
mutex_init(&table->table_lock);
table->dev = dev;
@@ -291,6 +322,8 @@ ext_err:
table_err:
mutex_destroy(&table->table_lock);
kfree(table);
+alloc_err:
+ mlx5_sf_hw_table_res_unregister(dev);
return err;
}
@@ -299,12 +332,14 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
if (!table)
- return;
+ goto res_unregister;
- mutex_destroy(&table->table_lock);
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
+ mutex_destroy(&table->table_lock);
kfree(table);
+res_unregister:
+ mlx5_sf_hw_table_res_unregister(dev);
}
static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 9dfe7148199f..faa63ea9b83e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1887,6 +1887,46 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
}
EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
+/* Ignore Action
+ * -------------
+ * The ignore action is used to ignore basic switching functions such as
+ * learning on a per-packet basis.
+ */
+
+#define MLXSW_AFA_IGNORE_CODE 0x0F
+#define MLXSW_AFA_IGNORE_SIZE 1
+
+/* afa_ignore_disable_learning
+ * Disable learning on ingress.
+ */
+MLXSW_ITEM32(afa, ignore, disable_learning, 0x00, 29, 1);
+
+/* afa_ignore_disable_security
+ * Disable security lookup on ingress.
+ * Reserved when Spectrum-1.
+ */
+MLXSW_ITEM32(afa, ignore, disable_security, 0x00, 28, 1);
+
+static void mlxsw_afa_ignore_pack(char *payload, bool disable_learning,
+ bool disable_security)
+{
+ mlxsw_afa_ignore_disable_learning_set(payload, disable_learning);
+ mlxsw_afa_ignore_disable_security_set(payload, disable_security);
+}
+
+int mlxsw_afa_block_append_ignore(struct mlxsw_afa_block *block,
+ bool disable_learning, bool disable_security)
+{
+ char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_IGNORE_CODE,
+ MLXSW_AFA_IGNORE_SIZE);
+
+ if (IS_ERR(act))
+ return PTR_ERR(act);
+ mlxsw_afa_ignore_pack(act, disable_learning, disable_security);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_ignore);
+
/* MC Routing Action
* -----------------
* The Multicast router action. Can be used by RMFT_V2 - Router Multicast
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index db58037be46e..0ead3a212de8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -89,6 +89,8 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
struct netlink_ext_ack *extack);
int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_ignore(struct mlxsw_afa_block *block,
+ bool disable_learning, bool disable_security);
int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
u16 expected_irif, u16 min_mtu,
bool rmid_valid, u32 kvdl_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 8da7bb04fc3a..02ca2871b6f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -1050,6 +1050,9 @@ int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u16 fid, struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_ignore(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool disable_learning, bool disable_security);
int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_flow_block *block,
@@ -1268,7 +1271,6 @@ int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f);
/* spectrum_fid.c */
-bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index);
int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 186161a3459d..7c59c8a13584 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -775,6 +775,15 @@ int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
return mlxsw_afa_block_append_fid_set(rulei->act_block, fid, extack);
}
+int mlxsw_sp_acl_rulei_act_ignore(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule_info *rulei,
+ bool disable_learning, bool disable_security)
+{
+ return mlxsw_afa_block_append_ignore(rulei->act_block,
+ disable_learning,
+ disable_security);
+}
+
int mlxsw_sp_acl_rulei_act_sample(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_flow_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index b6ee2d658b0c..9df098474743 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -137,16 +137,6 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
};
-bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
-{
- enum mlxsw_sp_fid_type fid_type = MLXSW_SP_FID_TYPE_DUMMY;
- struct mlxsw_sp_fid_family *fid_family;
-
- fid_family = mlxsw_sp->fid_core->fid_family_arr[fid_type];
-
- return fid_family->start_index == fid_index;
-}
-
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index af3f57d017ec..9fd1ca079258 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -160,6 +160,16 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
*/
rulei->egress_bind_blocker = 1;
+ /* Ignore learning and security lookup as redirection
+ * using ingress filters happens before the bridge.
+ */
+ err = mlxsw_sp_acl_rulei_act_ignore(mlxsw_sp, rulei,
+ true, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot append ignore action");
+ return err;
+ }
+
fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 3662b9da5489..6c749c148148 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -3066,9 +3066,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
- if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
- goto just_remove;
-
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
@@ -3136,9 +3133,6 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove;
}
- if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
- goto just_remove;
-
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
if (!mlxsw_sp_port_vlan) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 48f330d59284..4a16ebff3d1d 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2295,6 +2295,46 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
return 0;
}
+void mana_query_gf_stats(struct mana_port_context *apc)
+{
+ struct mana_query_gf_stat_resp resp = {};
+ struct mana_query_gf_stat_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
+ sizeof(req), sizeof(resp));
+ req.req_stats = STATISTICS_FLAGS_HC_TX_BYTES |
+ STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
+ STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
+ STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
+ STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
+ STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
+ STATISTICS_FLAGS_HC_TX_BCAST_BYTES;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err) {
+ netdev_err(ndev, "Failed to query GF stats: %d\n", err);
+ return;
+ }
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return;
+ }
+
+ apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
+ apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
+ apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
+ apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
+ apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
+ apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
+ apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
+}
+
static int mana_init_port(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 0dc78679f620..607150165ab4 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -13,6 +13,19 @@ static const struct {
} mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
+ {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
+ {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_tx_ucast_pkts)},
+ {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_tx_ucast_bytes)},
+ {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_tx_bcast_pkts)},
+ {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_tx_bcast_bytes)},
+ {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
+ hc_tx_mcast_pkts)},
+ {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ hc_tx_mcast_bytes)},
{"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
{"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
tx_cqe_unknown_type)},
@@ -114,6 +127,8 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
if (!apc->port_is_up)
return;
+ /* we call mana function to update stats from GDMA */
+ mana_query_gf_stats(apc);
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 7b0e390c0b07..0e265ed1f501 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -60,7 +60,7 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
#define QED_VF_CHANNEL_MSLEEP_DELAY 25
-static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
@@ -72,9 +72,6 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
/* output tlvs list */
qed_dp_tlv_list(p_hwfn, p_req);
- /* need to add the END TLV to the message size */
- resp_size += sizeof(struct channel_list_end_tlv);
-
/* Send TLVs over HW channel */
memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
trigger.vf_pf_msg_valid = 1;
@@ -172,7 +169,7 @@ static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
rc = -EAGAIN;
@@ -301,7 +298,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
/* send acquire request */
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
/* Re-try acquire in case of vf-pf hw channel timeout */
if (retry_cnt && rc == -EBUSY) {
@@ -705,7 +702,7 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
- rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status);
if (rc)
goto exit;
@@ -772,7 +769,7 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->queue_start;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -822,7 +819,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -867,7 +864,7 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->queue_start;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -918,7 +915,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -968,7 +965,7 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -997,7 +994,7 @@ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1075,12 +1072,10 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
struct vfpf_vport_update_tlv *req;
struct pfvf_def_resp_tlv *resp;
u8 update_rx, update_tx;
- u32 resp_size = 0;
u16 size, tlv;
int rc;
resp = &p_iov->pf2vf_reply->default_resp;
- resp_size = sizeof(*resp);
update_rx = p_params->update_vport_active_rx_flg;
update_tx = p_params->update_vport_active_tx_flg;
@@ -1096,7 +1091,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
if (update_rx) {
p_act_tlv->update_rx = update_rx;
@@ -1116,7 +1110,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
tlv, size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
}
@@ -1127,7 +1120,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
memcpy(p_mcast_tlv->bins, p_params->bins,
sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
@@ -1142,7 +1134,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
size = sizeof(struct vfpf_vport_update_accept_param_tlv);
p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
if (update_rx) {
p_accept_tlv->update_rx_mode = update_rx;
@@ -1166,7 +1157,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
p_rss_tlv = qed_add_tlv(p_hwfn,
&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_RSS, size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
if (rss_params->update_rss_config)
p_rss_tlv->update_rss_flags |=
@@ -1203,7 +1193,6 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
- resp_size += sizeof(struct pfvf_def_resp_tlv);
p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
p_any_vlan_tlv->update_accept_any_vlan_flg =
p_params->update_accept_any_vlan_flg;
@@ -1213,7 +1202,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1245,7 +1234,7 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1303,7 +1292,7 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1332,7 +1321,7 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1364,7 +1353,7 @@ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->read_coal_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
@@ -1402,7 +1391,7 @@ qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
p_resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status);
qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -1433,7 +1422,7 @@ qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
- rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status);
if (rc)
goto exit;
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index ace99c62d03a..9adec91f35e9 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -403,7 +403,7 @@ static struct serdev_device_driver qca_uart_driver = {
.remove = qca_uart_remove,
.driver = {
.name = QCAUART_DRV_NAME,
- .of_match_table = of_match_ptr(qca_uart_of_match),
+ .of_match_table = qca_uart_of_match,
},
};
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 3b26f1d86beb..e1c4a11c1f18 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1144,8 +1144,7 @@ static int smsc9420_mii_init(struct net_device *dev)
goto err_out_1;
}
pd->mii_bus->name = DRV_MDIONAME;
- snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x",
- (pd->pdev->bus->number << 8) | pd->pdev->devfn);
+ snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(pd->pdev));
pd->mii_bus->priv = pd;
pd->mii_bus->read = smsc9420_mii_read;
pd->mii_bus->write = smsc9420_mii_write;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index c67171975d5c..1f5293c8cc04 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -438,6 +438,8 @@ struct dma_features {
unsigned int tbssel;
/* Numbers of Auxiliary Snapshot Inputs */
unsigned int aux_snapshot_n;
+ /* Timestamp System Time Source */
+ unsigned int tssrc;
};
/* RX Buffer size must be multiple of 4/8/16 bytes */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index ce67b178c2d5..7f68bef456b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -123,6 +123,8 @@
#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_SAVLANINS BIT(27)
+#define XGMAC_HWFEAT_TSSTSSEL GENMASK(26, 25)
+#define XGMAC_HWFEAT_ADDMACADRSEL GENMASK(22, 18)
#define XGMAC_HWFEAT_RXCOESEL BIT(16)
#define XGMAC_HWFEAT_TXCOESEL BIT(14)
#define XGMAC_HWFEAT_EEESEL BIT(13)
@@ -133,7 +135,9 @@
#define XGMAC_HWFEAT_MMCSEL BIT(8)
#define XGMAC_HWFEAT_MGKSEL BIT(7)
#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_SMASEL BIT(5)
#define XGMAC_HWFEAT_VLHASH BIT(4)
+#define XGMAC_HWFEAT_HDSEL BIT(3)
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
#define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index b09395f5edcb..3aacf791efeb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -391,9 +391,11 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
{
u32 hw_cap;
- /* MAC HW feature 0 */
+ /* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
+ dma_cap->tssrc = (hw_cap & XGMAC_HWFEAT_TSSTSSEL) >> 25;
+ dma_cap->multi_addr = (hw_cap & XGMAC_HWFEAT_ADDMACADRSEL) >> 18;
dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
@@ -404,7 +406,9 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->sma_mdio = (hw_cap & XGMAC_HWFEAT_SMASEL) >> 5;
dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
+ dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
/* MAC HW feature 1 */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 351eca6109e0..733b5e900817 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -6237,6 +6237,12 @@ DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
{
+ static const char * const dwxgmac_timestamp_source[] = {
+ "None",
+ "Internal",
+ "External",
+ "Both",
+ };
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -6257,8 +6263,13 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.half_duplex) ? "Y" : "N");
seq_printf(seq, "\tHash Filter: %s\n",
(priv->dma_cap.hash_filter) ? "Y" : "N");
- seq_printf(seq, "\tMultiple MAC address registers: %s\n",
- (priv->dma_cap.multi_addr) ? "Y" : "N");
+ if (priv->plat->has_xgmac)
+ seq_printf(seq,
+ "\tNumber of Additional MAC address registers: %d\n",
+ priv->dma_cap.multi_addr);
+ else
+ seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+ (priv->dma_cap.multi_addr) ? "Y" : "N");
seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
(priv->dma_cap.pcs) ? "Y" : "N");
seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
@@ -6273,12 +6284,16 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.time_stamp) ? "Y" : "N");
seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
(priv->dma_cap.atime_stamp) ? "Y" : "N");
+ if (priv->plat->has_xgmac)
+ seq_printf(seq, "\tTimestamp System Time Source: %s\n",
+ dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
(priv->dma_cap.eee) ? "Y" : "N");
seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
- if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
+ priv->plat->has_xgmac) {
seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
(priv->dma_cap.rx_coe) ? "Y" : "N");
} else {
@@ -6286,9 +6301,9 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
(priv->dma_cap.rx_coe_type1) ? "Y" : "N");
seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
(priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
+ (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
}
- seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
- (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
priv->dma_cap.number_rx_channel);
seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 734a817d3c94..a9a6670b5ff1 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -124,7 +124,7 @@ static void vsw_set_rx_mode(struct net_device *dev)
return sunvnet_set_rx_mode_common(dev, port->vp);
}
-int ldmvsw_open(struct net_device *dev)
+static int ldmvsw_open(struct net_device *dev)
{
struct vnet_port *port = netdev_priv(dev);
struct vio_driver_state *vio = &port->vio;
@@ -136,7 +136,6 @@ int ldmvsw_open(struct net_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(ldmvsw_open);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void vsw_poll_controller(struct net_device *dev)
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index b50be67b398b..14cf6ecf6d0d 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -667,8 +667,7 @@ static int tc_mii_init(struct net_device *dev)
lp->mii_bus->name = "tc35815_mii_bus";
lp->mii_bus->read = tc_mdio_read;
lp->mii_bus->write = tc_mdio_write;
- snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x",
- (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn);
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(lp->pci_dev));
lp->mii_bus->priv = dev;
lp->mii_bus->parent = &lp->pci_dev->dev;
err = mdiobus_register(lp->mii_bus);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index cc2f325a52f7..fe20f02ecb3a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -266,8 +266,7 @@ int ngbe_mdio_init(struct wx *wx)
mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45;
}
- snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x",
- (pdev->bus->number << 8) | pdev->devfn);
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev));
ret = devm_mdiobus_register(&pdev->dev, mii_bus);
if (ret)
return ret;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 144ec756c796..ae60817ec5c2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -518,14 +518,8 @@ static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
static void count_tx(struct net_device *dev, int ret, int len)
{
- if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
-
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->tx_packets);
- u64_stats_add(&stats->tx_bytes, len);
- u64_stats_update_end(&stats->syncp);
- }
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
+ dev_sw_netstats_tx_add(dev, 1, len);
}
static void macsec_encrypt_done(void *data, int err)
@@ -827,12 +821,7 @@ static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
static void count_rx(struct net_device *dev, int len)
{
- struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
-
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->rx_packets);
- u64_stats_add(&stats->rx_bytes, len);
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_rx_add(dev, len);
}
static void macsec_decrypt_done(void *data, int err)
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index 9021b96d4f9d..dc3962b2aa6b 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -216,7 +216,7 @@ static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs,
/* The PCS needs to be configured manually only
* when not operating on in-band mode
*/
- if (neg_mode != PHYLINK_PCS_NEG_INBAND_ENABLED)
+ if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
return;
if (duplex == DUPLEX_HALF)
diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c
index da512fab0eb0..8a20d9889f10 100644
--- a/drivers/net/phy/mediatek-ge-soc.c
+++ b/drivers/net/phy/mediatek-ge-soc.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/phy.h>
+#include <linux/regmap.h>
#define MTK_GPHY_ID_MT7981 0x03a29461
#define MTK_GPHY_ID_MT7988 0x03a29481
@@ -206,9 +209,42 @@
#define MTK_PHY_DA_TX_R50_PAIR_C 0x53f
#define MTK_PHY_DA_TX_R50_PAIR_D 0x540
+/* Registers on MDIO_MMD_VEND2 */
+#define MTK_PHY_LED0_ON_CTRL 0x24
+#define MTK_PHY_LED1_ON_CTRL 0x26
+#define MTK_PHY_LED_ON_MASK GENMASK(6, 0)
+#define MTK_PHY_LED_ON_LINK1000 BIT(0)
+#define MTK_PHY_LED_ON_LINK100 BIT(1)
+#define MTK_PHY_LED_ON_LINK10 BIT(2)
+#define MTK_PHY_LED_ON_LINKDOWN BIT(3)
+#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */
+#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */
+#define MTK_PHY_LED_ON_FORCE_ON BIT(6)
+#define MTK_PHY_LED_ON_POLARITY BIT(14)
+#define MTK_PHY_LED_ON_ENABLE BIT(15)
+
+#define MTK_PHY_LED0_BLINK_CTRL 0x25
+#define MTK_PHY_LED1_BLINK_CTRL 0x27
+#define MTK_PHY_LED_BLINK_1000TX BIT(0)
+#define MTK_PHY_LED_BLINK_1000RX BIT(1)
+#define MTK_PHY_LED_BLINK_100TX BIT(2)
+#define MTK_PHY_LED_BLINK_100RX BIT(3)
+#define MTK_PHY_LED_BLINK_10TX BIT(4)
+#define MTK_PHY_LED_BLINK_10RX BIT(5)
+#define MTK_PHY_LED_BLINK_COLLISION BIT(6)
+#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
+#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
+#define MTK_PHY_LED_BLINK_FORCE_BLINK BIT(9)
+
+#define MTK_PHY_LED1_DEFAULT_POLARITIES BIT(1)
+
#define MTK_PHY_RG_BG_RASEL 0x115
#define MTK_PHY_RG_BG_RASEL_MASK GENMASK(2, 0)
+/* 'boottrap' register reflecting the configuration of the 4 PHY LEDs */
+#define RG_GPIO_MISC_TPBANK0 0x6f0
+#define RG_GPIO_MISC_TPBANK0_BOOTMODE GENMASK(11, 8)
+
/* These macro privides efuse parsing for internal phy. */
#define EFS_DA_TX_I2MPB_A(x) (((x) >> 0) & GENMASK(5, 0))
#define EFS_DA_TX_I2MPB_B(x) (((x) >> 6) & GENMASK(5, 0))
@@ -236,13 +272,6 @@ enum {
PAIR_D,
};
-enum {
- GPHY_PORT0,
- GPHY_PORT1,
- GPHY_PORT2,
- GPHY_PORT3,
-};
-
enum calibration_mode {
EFUSE_K,
SW_K
@@ -261,6 +290,19 @@ enum CAL_MODE {
SW_M
};
+#define MTK_PHY_LED_STATE_FORCE_ON 0
+#define MTK_PHY_LED_STATE_FORCE_BLINK 1
+#define MTK_PHY_LED_STATE_NETDEV 2
+
+struct mtk_socphy_priv {
+ unsigned long led_state;
+};
+
+struct mtk_socphy_shared {
+ u32 boottrap;
+ struct mtk_socphy_priv priv[4];
+};
+
static int mtk_socphy_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, MTK_EXT_PAGE_ACCESS);
@@ -1071,6 +1113,371 @@ static int mt798x_phy_config_init(struct phy_device *phydev)
return mt798x_phy_calibration(phydev);
}
+static int mt798x_phy_hw_led_on_set(struct phy_device *phydev, u8 index,
+ bool on)
+{
+ unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0);
+ struct mtk_socphy_priv *priv = phydev->priv;
+ bool changed;
+
+ if (on)
+ changed = !test_and_set_bit(bit_on, &priv->led_state);
+ else
+ changed = !!test_and_clear_bit(bit_on, &priv->led_state);
+
+ changed |= !!test_and_clear_bit(MTK_PHY_LED_STATE_NETDEV +
+ (index ? 16 : 0), &priv->led_state);
+ if (changed)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_MASK,
+ on ? MTK_PHY_LED_ON_FORCE_ON : 0);
+ else
+ return 0;
+}
+
+static int mt798x_phy_hw_led_blink_set(struct phy_device *phydev, u8 index,
+ bool blinking)
+{
+ unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + (index ? 16 : 0);
+ struct mtk_socphy_priv *priv = phydev->priv;
+ bool changed;
+
+ if (blinking)
+ changed = !test_and_set_bit(bit_blink, &priv->led_state);
+ else
+ changed = !!test_and_clear_bit(bit_blink, &priv->led_state);
+
+ changed |= !!test_bit(MTK_PHY_LED_STATE_NETDEV +
+ (index ? 16 : 0), &priv->led_state);
+ if (changed)
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_BLINK_CTRL : MTK_PHY_LED0_BLINK_CTRL,
+ blinking ? MTK_PHY_LED_BLINK_FORCE_BLINK : 0);
+ else
+ return 0;
+}
+
+static int mt798x_phy_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ bool blinking = false;
+ int err = 0;
+
+ if (index > 1)
+ return -EINVAL;
+
+ if (delay_on && delay_off && (*delay_on > 0) && (*delay_off > 0)) {
+ blinking = true;
+ *delay_on = 50;
+ *delay_off = 50;
+ }
+
+ err = mt798x_phy_hw_led_blink_set(phydev, index, blinking);
+ if (err)
+ return err;
+
+ return mt798x_phy_hw_led_on_set(phydev, index, false);
+}
+
+static int mt798x_phy_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ int err;
+
+ err = mt798x_phy_hw_led_blink_set(phydev, index, false);
+ if (err)
+ return err;
+
+ return mt798x_phy_hw_led_on_set(phydev, index, (value != LED_OFF));
+}
+
+static const unsigned long supported_triggers = (BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_HALF_DUPLEX) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX));
+
+static int mt798x_phy_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index > 1)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~supported_triggers)
+ return -EOPNOTSUPP;
+
+ return 0;
+};
+
+static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + (index ? 16 : 0);
+ unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0);
+ unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0);
+ struct mtk_socphy_priv *priv = phydev->priv;
+ int on, blink;
+
+ if (index > 1)
+ return -EINVAL;
+
+ on = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ index ? MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL);
+
+ if (on < 0)
+ return -EIO;
+
+ blink = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ index ? MTK_PHY_LED1_BLINK_CTRL :
+ MTK_PHY_LED0_BLINK_CTRL);
+ if (blink < 0)
+ return -EIO;
+
+ if ((on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 |
+ MTK_PHY_LED_ON_LINK10)) ||
+ (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX |
+ MTK_PHY_LED_BLINK_10RX | MTK_PHY_LED_BLINK_1000TX |
+ MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX)))
+ set_bit(bit_netdev, &priv->led_state);
+ else
+ clear_bit(bit_netdev, &priv->led_state);
+
+ if (on & MTK_PHY_LED_ON_FORCE_ON)
+ set_bit(bit_on, &priv->led_state);
+ else
+ clear_bit(bit_on, &priv->led_state);
+
+ if (blink & MTK_PHY_LED_BLINK_FORCE_BLINK)
+ set_bit(bit_blink, &priv->led_state);
+ else
+ clear_bit(bit_blink, &priv->led_state);
+
+ if (!rules)
+ return 0;
+
+ if (on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK10))
+ *rules |= BIT(TRIGGER_NETDEV_LINK);
+
+ if (on & MTK_PHY_LED_ON_LINK10)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_10);
+
+ if (on & MTK_PHY_LED_ON_LINK100)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_100);
+
+ if (on & MTK_PHY_LED_ON_LINK1000)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_1000);
+
+ if (on & MTK_PHY_LED_ON_FDX)
+ *rules |= BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+
+ if (on & MTK_PHY_LED_ON_HDX)
+ *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
+
+ if (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | MTK_PHY_LED_BLINK_10RX))
+ *rules |= BIT(TRIGGER_NETDEV_RX);
+
+ if (blink & (MTK_PHY_LED_BLINK_1000TX | MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX))
+ *rules |= BIT(TRIGGER_NETDEV_TX);
+
+ return 0;
+};
+
+static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0);
+ struct mtk_socphy_priv *priv = phydev->priv;
+ u16 on = 0, blink = 0;
+ int ret;
+
+ if (index > 1)
+ return -EINVAL;
+
+ if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ on |= MTK_PHY_LED_ON_FDX;
+
+ if (rules & BIT(TRIGGER_NETDEV_HALF_DUPLEX))
+ on |= MTK_PHY_LED_ON_HDX;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= MTK_PHY_LED_ON_LINK10;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= MTK_PHY_LED_ON_LINK100;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= MTK_PHY_LED_ON_LINK1000;
+
+ if (rules & BIT(TRIGGER_NETDEV_RX)) {
+ blink |= MTK_PHY_LED_BLINK_10RX |
+ MTK_PHY_LED_BLINK_100RX |
+ MTK_PHY_LED_BLINK_1000RX;
+ }
+
+ if (rules & BIT(TRIGGER_NETDEV_TX)) {
+ blink |= MTK_PHY_LED_BLINK_10TX |
+ MTK_PHY_LED_BLINK_100TX |
+ MTK_PHY_LED_BLINK_1000TX;
+ }
+
+ if (blink || on)
+ set_bit(bit_netdev, &priv->led_state);
+ else
+ clear_bit(bit_netdev, &priv->led_state);
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_ON_CTRL :
+ MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_FDX |
+ MTK_PHY_LED_ON_HDX |
+ MTK_PHY_LED_ON_LINK10 |
+ MTK_PHY_LED_ON_LINK100 |
+ MTK_PHY_LED_ON_LINK1000,
+ on);
+
+ if (ret)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_BLINK_CTRL :
+ MTK_PHY_LED0_BLINK_CTRL, blink);
+};
+
+static bool mt7988_phy_led_get_polarity(struct phy_device *phydev, int led_num)
+{
+ struct mtk_socphy_shared *priv = phydev->shared->priv;
+ u32 polarities;
+
+ if (led_num == 0)
+ polarities = ~(priv->boottrap);
+ else
+ polarities = MTK_PHY_LED1_DEFAULT_POLARITIES;
+
+ if (polarities & BIT(phydev->mdio.addr))
+ return true;
+
+ return false;
+}
+
+static int mt7988_phy_fix_leds_polarities(struct phy_device *phydev)
+{
+ struct pinctrl *pinctrl;
+ int index;
+
+ /* Setup LED polarity according to bootstrap use of LED pins */
+ for (index = 0; index < 2; ++index)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ?
+ MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL,
+ MTK_PHY_LED_ON_POLARITY,
+ mt7988_phy_led_get_polarity(phydev, index) ?
+ MTK_PHY_LED_ON_POLARITY : 0);
+
+ /* Only now setup pinctrl to avoid bogus blinking */
+ pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "gbe-led");
+ if (IS_ERR(pinctrl))
+ dev_err(&phydev->mdio.bus->dev, "Failed to setup PHY LED pinctrl\n");
+
+ return 0;
+}
+
+static int mt7988_phy_probe_shared(struct phy_device *phydev)
+{
+ struct device_node *np = dev_of_node(&phydev->mdio.bus->dev);
+ struct mtk_socphy_shared *shared = phydev->shared->priv;
+ struct regmap *regmap;
+ u32 reg;
+ int ret;
+
+ /* The LED0 of the 4 PHYs in MT7988 are wired to SoC pins LED_A, LED_B,
+ * LED_C and LED_D respectively. At the same time those pins are used to
+ * bootstrap configuration of the reference clock source (LED_A),
+ * DRAM DDRx16b x2/x1 (LED_B) and boot device (LED_C, LED_D).
+ * In practise this is done using a LED and a resistor pulling the pin
+ * either to GND or to VIO.
+ * The detected value at boot time is accessible at run-time using the
+ * TPBANK0 register located in the gpio base of the pinctrl, in order
+ * to read it here it needs to be referenced by a phandle called
+ * 'mediatek,pio' in the MDIO bus hosting the PHY.
+ * The 4 bits in TPBANK0 are kept as package shared data and are used to
+ * set LED polarity for each of the LED0.
+ */
+ regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,pio");
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = regmap_read(regmap, RG_GPIO_MISC_TPBANK0, &reg);
+ if (ret)
+ return ret;
+
+ shared->boottrap = FIELD_GET(RG_GPIO_MISC_TPBANK0_BOOTMODE, reg);
+
+ return 0;
+}
+
+static void mt798x_phy_leds_state_init(struct phy_device *phydev)
+{
+ int i;
+
+ for (i = 0; i < 2; ++i)
+ mt798x_phy_led_hw_control_get(phydev, i, NULL);
+}
+
+static int mt7988_phy_probe(struct phy_device *phydev)
+{
+ struct mtk_socphy_shared *shared;
+ struct mtk_socphy_priv *priv;
+ int err;
+
+ if (phydev->mdio.addr > 3)
+ return -EINVAL;
+
+ err = devm_phy_package_join(&phydev->mdio.dev, phydev, 0,
+ sizeof(struct mtk_socphy_shared));
+ if (err)
+ return err;
+
+ if (phy_package_probe_once(phydev)) {
+ err = mt7988_phy_probe_shared(phydev);
+ if (err)
+ return err;
+ }
+
+ shared = phydev->shared->priv;
+ priv = &shared->priv[phydev->mdio.addr];
+
+ phydev->priv = priv;
+
+ mt798x_phy_leds_state_init(phydev);
+
+ err = mt7988_phy_fix_leds_polarities(phydev);
+ if (err)
+ return err;
+
+ return mt798x_phy_calibration(phydev);
+}
+
+static int mt7981_phy_probe(struct phy_device *phydev)
+{
+ struct mtk_socphy_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(struct mtk_socphy_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ mt798x_phy_leds_state_init(phydev);
+
+ return mt798x_phy_calibration(phydev);
+}
+
static struct phy_driver mtk_socphy_driver[] = {
{
PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7981),
@@ -1078,11 +1485,16 @@ static struct phy_driver mtk_socphy_driver[] = {
.config_init = mt798x_phy_config_init,
.config_intr = genphy_no_config_intr,
.handle_interrupt = genphy_handle_interrupt_no_ack,
- .probe = mt798x_phy_calibration,
+ .probe = mt7981_phy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = mtk_socphy_read_page,
.write_page = mtk_socphy_write_page,
+ .led_blink_set = mt798x_phy_led_blink_set,
+ .led_brightness_set = mt798x_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_phy_led_hw_is_supported,
+ .led_hw_control_set = mt798x_phy_led_hw_control_set,
+ .led_hw_control_get = mt798x_phy_led_hw_control_get,
},
{
PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7988),
@@ -1090,11 +1502,16 @@ static struct phy_driver mtk_socphy_driver[] = {
.config_init = mt798x_phy_config_init,
.config_intr = genphy_no_config_intr,
.handle_interrupt = genphy_handle_interrupt_no_ack,
- .probe = mt798x_phy_calibration,
+ .probe = mt7988_phy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = mtk_socphy_read_page,
.write_page = mtk_socphy_write_page,
+ .led_blink_set = mt798x_phy_led_blink_set,
+ .led_brightness_set = mt798x_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_phy_led_hw_is_supported,
+ .led_hw_control_set = mt798x_phy_led_hw_control_set,
+ .led_hw_control_get = mt798x_phy_led_hw_control_get,
},
};
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index a64186dc53f8..966c93cbe616 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -142,6 +142,8 @@ int phy_interface_num_ports(phy_interface_t interface)
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
return 4;
+ case PHY_INTERFACE_MODE_PSGMII:
+ return 5;
case PHY_INTERFACE_MODE_MAX:
WARN_ONCE(1, "PHY_INTERFACE_MODE_MAX isn't a valid interface mode");
return 0;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 4f1c8bb199e9..160bce608c34 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -210,6 +210,7 @@ static int phylink_interface_max_speed(phy_interface_t interface)
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_PSGMII:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_SGMII:
@@ -475,6 +476,7 @@ unsigned long phylink_get_capabilities(phy_interface_t interface,
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_PSGMII:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_SGMII:
@@ -868,6 +870,7 @@ static int phylink_parse_mode(struct phylink *pl,
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_PSGMII:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_RGMII:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index db9897e825b4..bb234cf0cea0 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -9760,8 +9760,7 @@ static int rtl8152_probe(struct usb_interface *intf,
usb_set_intfdata(intf, tp);
- netif_napi_add_weight(netdev, &tp->napi, r8152_poll,
- tp->support_2500full ? 256 : 64);
+ netif_napi_add(netdev, &tp->napi, r8152_poll);
ret = register_netdev(netdev);
if (ret != 0) {
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index a666a88ac1ff..f82870c10205 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -32,4 +32,4 @@
obj-$(CONFIG_VMXNET3) += vmxnet3.o
-vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o
+vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o vmxnet3_xdp.o
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 7fa74b8b2100..0578864792b6 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -28,6 +28,7 @@
#include <net/ip6_checksum.h>
#include "vmxnet3_int.h"
+#include "vmxnet3_xdp.h"
char vmxnet3_driver_name[] = "vmxnet3";
#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
@@ -338,14 +339,16 @@ static void
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
struct pci_dev *pdev)
{
- if (tbi->map_type == VMXNET3_MAP_SINGLE)
+ u32 map_type = tbi->map_type;
+
+ if (map_type & VMXNET3_MAP_SINGLE)
dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
DMA_TO_DEVICE);
- else if (tbi->map_type == VMXNET3_MAP_PAGE)
+ else if (map_type & VMXNET3_MAP_PAGE)
dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
DMA_TO_DEVICE);
else
- BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
+ BUG_ON(map_type & ~VMXNET3_MAP_XDP);
tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
}
@@ -353,19 +356,20 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
static int
vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
- struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
+ struct pci_dev *pdev, struct vmxnet3_adapter *adapter,
+ struct xdp_frame_bulk *bq)
{
- struct sk_buff *skb;
+ struct vmxnet3_tx_buf_info *tbi;
int entries = 0;
+ u32 map_type;
/* no out of order completion */
BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
- skb = tq->buf_info[eop_idx].skb;
- BUG_ON(skb == NULL);
- tq->buf_info[eop_idx].skb = NULL;
-
+ tbi = &tq->buf_info[eop_idx];
+ BUG_ON(!tbi->skb);
+ map_type = tbi->map_type;
VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
while (tq->tx_ring.next2comp != eop_idx) {
@@ -381,7 +385,14 @@ vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
entries++;
}
- dev_kfree_skb_any(skb);
+ if (map_type & VMXNET3_MAP_XDP)
+ xdp_return_frame_bulk(tbi->xdpf, bq);
+ else
+ dev_kfree_skb_any(tbi->skb);
+
+ /* xdpf and skb are in an anonymous union. */
+ tbi->skb = NULL;
+
return entries;
}
@@ -390,8 +401,12 @@ static int
vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
- int completed = 0;
union Vmxnet3_GenericDesc *gdesc;
+ struct xdp_frame_bulk bq;
+ int completed = 0;
+
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock();
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
@@ -402,11 +417,13 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
&gdesc->tcd), tq, adapter->pdev,
- adapter);
+ adapter, &bq);
vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
}
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
if (completed) {
spin_lock(&tq->tx_lock);
@@ -426,26 +443,36 @@ static void
vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
+ struct xdp_frame_bulk bq;
+ u32 map_type;
int i;
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock();
+
while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
struct vmxnet3_tx_buf_info *tbi;
tbi = tq->buf_info + tq->tx_ring.next2comp;
+ map_type = tbi->map_type;
vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
if (tbi->skb) {
- dev_kfree_skb_any(tbi->skb);
+ if (map_type & VMXNET3_MAP_XDP)
+ xdp_return_frame_bulk(tbi->xdpf, &bq);
+ else
+ dev_kfree_skb_any(tbi->skb);
tbi->skb = NULL;
}
vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
}
- /* sanity check, verify all buffers are indeed unmapped and freed */
- for (i = 0; i < tq->tx_ring.size; i++) {
- BUG_ON(tq->buf_info[i].skb != NULL ||
- tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
- }
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
+
+ /* sanity check, verify all buffers are indeed unmapped */
+ for (i = 0; i < tq->tx_ring.size; i++)
+ BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
tq->tx_ring.gen = VMXNET3_INIT_GEN;
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
@@ -599,7 +626,17 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
gd = ring->base + ring->next2fill;
rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
- if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
+ if (rbi->buf_type == VMXNET3_RX_BUF_XDP) {
+ void *data = vmxnet3_pp_get_buff(rq->page_pool,
+ &rbi->dma_addr,
+ GFP_KERNEL);
+ if (!data) {
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+ rbi->page = virt_to_page(data);
+ val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
+ } else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
if (rbi->skb == NULL) {
rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
rbi->len,
@@ -1263,6 +1300,63 @@ drop_pkt:
return NETDEV_TX_OK;
}
+static int
+vmxnet3_create_pp(struct vmxnet3_adapter *adapter,
+ struct vmxnet3_rx_queue *rq, int size)
+{
+ bool xdp_prog = vmxnet3_xdp_enabled(adapter);
+ const struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = &adapter->pdev->dev,
+ .offset = VMXNET3_XDP_RX_OFFSET,
+ .max_len = VMXNET3_XDP_MAX_FRSIZE,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ };
+ struct page_pool *pp;
+ int err;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
+ rq->napi.napi_id);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
+ if (err)
+ goto err_unregister_rxq;
+
+ rq->page_pool = pp;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(pp);
+
+ return err;
+}
+
+void *
+vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
+ if (unlikely(!page))
+ return NULL;
+
+ *dma_addr = page_pool_get_dma_addr(page) + pp->p.offset;
+
+ return page_address(page);
+}
static netdev_tx_t
vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -1423,6 +1517,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct Vmxnet3_RxDesc rxCmdDesc;
struct Vmxnet3_RxCompDesc rxComp;
#endif
+ bool need_flush = false;
+
vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
&rxComp);
while (rcd->gen == rq->comp_ring.gen) {
@@ -1463,6 +1559,31 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
goto rcd_done;
}
+ if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) {
+ struct sk_buff *skb_xdp_pass;
+ int act;
+
+ if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) {
+ ctx->skb = NULL;
+ goto skip_xdp; /* Handle it later. */
+ }
+
+ if (rbi->buf_type != VMXNET3_RX_BUF_XDP)
+ goto rcd_done;
+
+ act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
+ &skb_xdp_pass);
+ if (act == XDP_PASS) {
+ ctx->skb = skb_xdp_pass;
+ goto sop_done;
+ }
+ ctx->skb = NULL;
+ need_flush |= act == XDP_REDIRECT;
+
+ goto rcd_done;
+ }
+skip_xdp:
+
if (rcd->sop) { /* first buf of the pkt */
bool rxDataRingUsed;
u16 len;
@@ -1471,7 +1592,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
(rcd->rqID != rq->qid &&
rcd->rqID != rq->dataRingQid));
- BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
+ BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB &&
+ rbi->buf_type != VMXNET3_RX_BUF_XDP);
BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
if (unlikely(rcd->len == 0)) {
@@ -1489,6 +1611,25 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
rxDataRingUsed =
VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
len = rxDataRingUsed ? rcd->len : rbi->len;
+
+ if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) {
+ struct sk_buff *skb_xdp_pass;
+ size_t sz;
+ int act;
+
+ sz = rcd->rxdIdx * rq->data_ring.desc_size;
+ act = vmxnet3_process_xdp_small(adapter, rq,
+ &rq->data_ring.base[sz],
+ rcd->len,
+ &skb_xdp_pass);
+ if (act == XDP_PASS) {
+ ctx->skb = skb_xdp_pass;
+ goto sop_done;
+ }
+ need_flush |= act == XDP_REDIRECT;
+
+ goto rcd_done;
+ }
new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
len);
if (new_skb == NULL) {
@@ -1621,6 +1762,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
}
+sop_done:
skb = ctx->skb;
if (rcd->eop) {
u32 mtu = adapter->netdev->mtu;
@@ -1757,6 +1899,8 @@ refill_buf:
vmxnet3_getRxComp(rcd,
&rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
}
+ if (need_flush)
+ xdp_do_flush();
return num_pkts;
}
@@ -1775,24 +1919,32 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
+ struct vmxnet3_rx_buf_info *rbi;
#ifdef __BIG_ENDIAN_BITFIELD
struct Vmxnet3_RxDesc rxDesc;
#endif
+
+ rbi = &rq->buf_info[ring_idx][i];
vmxnet3_getRxDesc(rxd,
&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
- rq->buf_info[ring_idx][i].skb) {
+ rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) {
+ page_pool_recycle_direct(rq->page_pool,
+ rbi->page);
+ rbi->page = NULL;
+ } else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
+ rbi->skb) {
dma_unmap_single(&adapter->pdev->dev, rxd->addr,
rxd->len, DMA_FROM_DEVICE);
- dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
- rq->buf_info[ring_idx][i].skb = NULL;
+ dev_kfree_skb(rbi->skb);
+ rbi->skb = NULL;
} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
- rq->buf_info[ring_idx][i].page) {
+ rbi->page) {
dma_unmap_page(&adapter->pdev->dev, rxd->addr,
rxd->len, DMA_FROM_DEVICE);
- put_page(rq->buf_info[ring_idx][i].page);
- rq->buf_info[ring_idx][i].page = NULL;
+ put_page(rbi->page);
+ rbi->page = NULL;
}
}
@@ -1813,6 +1965,7 @@ vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++)
vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
+ rcu_assign_pointer(adapter->xdp_bpf_prog, NULL);
}
@@ -1842,6 +1995,11 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
}
}
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ page_pool_destroy(rq->page_pool);
+ rq->page_pool = NULL;
+
if (rq->data_ring.base) {
dma_free_coherent(&adapter->pdev->dev,
rq->rx_ring[0].size * rq->data_ring.desc_size,
@@ -1885,14 +2043,16 @@ static int
vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
- int i;
+ int i, err;
/* initialize buf_info */
for (i = 0; i < rq->rx_ring[0].size; i++) {
- /* 1st buf for a pkt is skbuff */
+ /* 1st buf for a pkt is skbuff or xdp page */
if (i % adapter->rx_buf_per_pkt == 0) {
- rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
+ rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
+ VMXNET3_RX_BUF_XDP :
+ VMXNET3_RX_BUF_SKB;
rq->buf_info[0][i].len = adapter->skb_buf_size;
} else { /* subsequent bufs for a pkt is frag */
rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
@@ -1913,8 +2073,18 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
rq->rx_ring[i].isOutOfOrder = 0;
}
+
+ err = vmxnet3_create_pp(adapter, rq,
+ rq->rx_ring[0].size + rq->rx_ring[1].size);
+ if (err)
+ return err;
+
if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
adapter) == 0) {
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ page_pool_destroy(rq->page_pool);
+ rq->page_pool = NULL;
+
/* at least has 1 rx buffer for the 1st ring */
return -ENOMEM;
}
@@ -2016,7 +2186,7 @@ err:
}
-static int
+int
vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
{
int i, err = 0;
@@ -3053,7 +3223,7 @@ vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
}
-static void
+void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
{
size_t sz, i, ring0_size, ring1_size, comp_size;
@@ -3612,6 +3782,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vmxnet3_netpoll,
#endif
+ .ndo_bpf = vmxnet3_xdp,
+ .ndo_xdp_xmit = vmxnet3_xdp_xmit,
};
int err;
u32 ver;
@@ -3864,6 +4036,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
vmxnet3_declare_features(adapter);
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 18cf7c723201..98c22d7d87a2 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -28,6 +28,7 @@
#include "vmxnet3_int.h"
#include <net/vxlan.h>
#include <net/geneve.h>
+#include "vmxnet3_xdp.h"
#define VXLAN_UDP_PORT 8472
@@ -76,6 +77,10 @@ vmxnet3_tq_driver_stats[] = {
copy_skb_header) },
{ " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
oversized_hdr) },
+ { " xdp xmit", offsetof(struct vmxnet3_tq_driver_stats,
+ xdp_xmit) },
+ { " xdp xmit err", offsetof(struct vmxnet3_tq_driver_stats,
+ xdp_xmit_err) },
};
/* per rq stats maintained by the device */
@@ -106,6 +111,16 @@ vmxnet3_rq_driver_stats[] = {
drop_fcs) },
{ " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
rx_buf_alloc_failure) },
+ { " xdp packets", offsetof(struct vmxnet3_rq_driver_stats,
+ xdp_packets) },
+ { " xdp tx", offsetof(struct vmxnet3_rq_driver_stats,
+ xdp_tx) },
+ { " xdp redirects", offsetof(struct vmxnet3_rq_driver_stats,
+ xdp_redirects) },
+ { " xdp drops", offsetof(struct vmxnet3_rq_driver_stats,
+ xdp_drops) },
+ { " xdp aborted", offsetof(struct vmxnet3_rq_driver_stats,
+ xdp_aborted) },
};
/* global stats maintained by the driver */
@@ -249,10 +264,18 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
netdev_features_t features)
{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
/* If Rx checksum is disabled, then LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
+ /* If XDP is enabled, then LRO should not be enabled */
+ if (vmxnet3_xdp_enabled(adapter) && (features & NETIF_F_LRO)) {
+ netdev_err(netdev, "LRO is not supported with XDP");
+ features &= ~NETIF_F_LRO;
+ }
+
return features;
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3367db23aa13..915aaf18c409 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -56,6 +56,9 @@
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/log2.h>
+#include <linux/bpf.h>
+#include <net/page_pool/helpers.h>
+#include <net/xdp.h>
#include "vmxnet3_defs.h"
@@ -188,19 +191,20 @@ struct vmxnet3_tx_data_ring {
dma_addr_t basePA;
};
-enum vmxnet3_buf_map_type {
- VMXNET3_MAP_INVALID = 0,
- VMXNET3_MAP_NONE,
- VMXNET3_MAP_SINGLE,
- VMXNET3_MAP_PAGE,
-};
+#define VMXNET3_MAP_NONE 0
+#define VMXNET3_MAP_SINGLE BIT(0)
+#define VMXNET3_MAP_PAGE BIT(1)
+#define VMXNET3_MAP_XDP BIT(2)
struct vmxnet3_tx_buf_info {
u32 map_type;
u16 len;
u16 sop_idx;
dma_addr_t dma_addr;
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
};
struct vmxnet3_tq_driver_stats {
@@ -217,6 +221,9 @@ struct vmxnet3_tq_driver_stats {
u64 linearized; /* # of pkts linearized */
u64 copy_skb_header; /* # of times we have to copy skb header */
u64 oversized_hdr;
+
+ u64 xdp_xmit;
+ u64 xdp_xmit_err;
};
struct vmxnet3_tx_ctx {
@@ -253,12 +260,13 @@ struct vmxnet3_tx_queue {
* stopped */
int qid;
u16 txdata_desc_size;
-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+} ____cacheline_aligned;
enum vmxnet3_rx_buf_type {
VMXNET3_RX_BUF_NONE = 0,
VMXNET3_RX_BUF_SKB = 1,
- VMXNET3_RX_BUF_PAGE = 2
+ VMXNET3_RX_BUF_PAGE = 2,
+ VMXNET3_RX_BUF_XDP = 3,
};
#define VMXNET3_RXD_COMP_PENDING 0
@@ -285,6 +293,12 @@ struct vmxnet3_rq_driver_stats {
u64 drop_err;
u64 drop_fcs;
u64 rx_buf_alloc_failure;
+
+ u64 xdp_packets; /* Total packets processed by XDP. */
+ u64 xdp_tx;
+ u64 xdp_redirects;
+ u64 xdp_drops;
+ u64 xdp_aborted;
};
struct vmxnet3_rx_data_ring {
@@ -307,7 +321,9 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rx_buf_info *buf_info[2];
struct Vmxnet3_RxQueueCtrl *shared;
struct vmxnet3_rq_driver_stats stats;
-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+} ____cacheline_aligned;
#define VMXNET3_DEVICE_MAX_TX_QUEUES 32
#define VMXNET3_DEVICE_MAX_RX_QUEUES 32 /* Keep this value as a power of 2 */
@@ -415,6 +431,7 @@ struct vmxnet3_adapter {
u16 tx_prod_offset;
u16 rx_prod_offset;
u16 rx_prod2_offset;
+ struct bpf_prog __rcu *xdp_bpf_prog;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -490,6 +507,12 @@ vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
+int
+vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter);
+
+void
+vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter);
+
netdev_features_t
vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features);
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
new file mode 100644
index 000000000000..80ddaff759d4
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ * Copyright (C) 2008-2023, VMware, Inc. All Rights Reserved.
+ * Maintained by: [email protected]
+ *
+ */
+
+#include "vmxnet3_int.h"
+#include "vmxnet3_xdp.h"
+
+static void
+vmxnet3_xdp_exchange_program(struct vmxnet3_adapter *adapter,
+ struct bpf_prog *prog)
+{
+ rcu_assign_pointer(adapter->xdp_bpf_prog, prog);
+}
+
+static inline struct vmxnet3_tx_queue *
+vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
+{
+ struct vmxnet3_tx_queue *tq;
+ int tq_number;
+ int cpu;
+
+ tq_number = adapter->num_tx_queues;
+ cpu = smp_processor_id();
+ if (likely(cpu < tq_number))
+ tq = &adapter->tx_queue[cpu];
+ else
+ tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
+
+ return tq;
+}
+
+static int
+vmxnet3_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf,
+ struct netlink_ext_ack *extack)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct bpf_prog *new_bpf_prog = bpf->prog;
+ struct bpf_prog *old_bpf_prog;
+ bool need_update;
+ bool running;
+ int err;
+
+ if (new_bpf_prog && netdev->mtu > VMXNET3_XDP_MAX_MTU) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "MTU %u too large for XDP",
+ netdev->mtu);
+ return -EOPNOTSUPP;
+ }
+
+ if (adapter->netdev->features & NETIF_F_LRO) {
+ NL_SET_ERR_MSG_MOD(extack, "LRO is not supported with XDP");
+ adapter->netdev->features &= ~NETIF_F_LRO;
+ }
+
+ old_bpf_prog = rcu_dereference(adapter->xdp_bpf_prog);
+ if (!new_bpf_prog && !old_bpf_prog)
+ return 0;
+
+ running = netif_running(netdev);
+ need_update = !!old_bpf_prog != !!new_bpf_prog;
+
+ if (running && need_update)
+ vmxnet3_quiesce_dev(adapter);
+
+ vmxnet3_xdp_exchange_program(adapter, new_bpf_prog);
+ if (old_bpf_prog)
+ bpf_prog_put(old_bpf_prog);
+
+ if (!running || !need_update)
+ return 0;
+
+ if (new_bpf_prog)
+ xdp_features_set_redirect_target(netdev, false);
+ else
+ xdp_features_clear_redirect_target(netdev);
+
+ vmxnet3_reset_dev(adapter);
+ vmxnet3_rq_destroy_all(adapter);
+ vmxnet3_adjust_rx_ring_size(adapter);
+ err = vmxnet3_rq_create_all(adapter);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "failed to re-create rx queues for XDP.");
+ return -EOPNOTSUPP;
+ }
+ err = vmxnet3_activate_dev(adapter);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "failed to activate device for XDP.");
+ return -EOPNOTSUPP;
+ }
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+
+ return 0;
+}
+
+/* This is the main xdp call used by kernel to set/unset eBPF program. */
+int
+vmxnet3_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return vmxnet3_xdp_set(netdev, bpf, bpf->extack);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ struct xdp_frame *xdpf,
+ struct vmxnet3_tx_queue *tq, bool dma_map)
+{
+ struct vmxnet3_tx_buf_info *tbi = NULL;
+ union Vmxnet3_GenericDesc *gdesc;
+ struct vmxnet3_tx_ctx ctx;
+ int tx_num_deferred;
+ struct page *page;
+ u32 buf_size;
+ u32 dw2;
+
+ dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
+ dw2 |= xdpf->len;
+ ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
+ gdesc = ctx.sop_txd;
+
+ buf_size = xdpf->len;
+ tbi = tq->buf_info + tq->tx_ring.next2fill;
+
+ if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
+ tq->stats.tx_ring_full++;
+ return -ENOSPC;
+ }
+
+ tbi->map_type = VMXNET3_MAP_XDP;
+ if (dma_map) { /* ndo_xdp_xmit */
+ tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
+ xdpf->data, buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ return -EFAULT;
+ tbi->map_type |= VMXNET3_MAP_SINGLE;
+ } else { /* XDP buffer from page pool */
+ page = virt_to_page(xdpf->data);
+ tbi->dma_addr = page_pool_get_dma_addr(page) +
+ VMXNET3_XDP_HEADROOM;
+ dma_sync_single_for_device(&adapter->pdev->dev,
+ tbi->dma_addr, buf_size,
+ DMA_TO_DEVICE);
+ }
+ tbi->xdpf = xdpf;
+ tbi->len = buf_size;
+
+ gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
+ WARN_ON_ONCE(gdesc->txd.gen == tq->tx_ring.gen);
+
+ gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
+ gdesc->dword[2] = cpu_to_le32(dw2);
+
+ /* Setup the EOP desc */
+ gdesc->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
+
+ gdesc->txd.om = 0;
+ gdesc->txd.msscof = 0;
+ gdesc->txd.hlen = 0;
+ gdesc->txd.ti = 0;
+
+ tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
+ le32_add_cpu(&tq->shared->txNumDeferred, 1);
+ tx_num_deferred++;
+
+ vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
+
+ /* set the last buf_info for the pkt */
+ tbi->sop_idx = ctx.sop_txd - tq->tx_ring.base;
+
+ dma_wmb();
+ gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
+ VMXNET3_TXD_GEN);
+
+ /* No need to handle the case when tx_num_deferred doesn't reach
+ * threshold. Backend driver at hypervisor side will poll and reset
+ * tq->shared->txNumDeferred to 0.
+ */
+ if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
+ tq->shared->txNumDeferred = 0;
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ VMXNET3_REG_TXPROD + tq->qid * 8,
+ tq->tx_ring.next2fill);
+ }
+
+ return 0;
+}
+
+static int
+vmxnet3_xdp_xmit_back(struct vmxnet3_adapter *adapter,
+ struct xdp_frame *xdpf)
+{
+ struct vmxnet3_tx_queue *tq;
+ struct netdev_queue *nq;
+ int err;
+
+ tq = vmxnet3_xdp_get_tq(adapter);
+ if (tq->stopped)
+ return -ENETDOWN;
+
+ nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
+
+ __netif_tx_lock(nq, smp_processor_id());
+ err = vmxnet3_xdp_xmit_frame(adapter, xdpf, tq, false);
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
+/* ndo_xdp_xmit */
+int
+vmxnet3_xdp_xmit(struct net_device *dev,
+ int n, struct xdp_frame **frames, u32 flags)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(dev);
+ struct vmxnet3_tx_queue *tq;
+ int i;
+
+ if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
+ return -ENETDOWN;
+ if (unlikely(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)))
+ return -EINVAL;
+
+ tq = vmxnet3_xdp_get_tq(adapter);
+ if (tq->stopped)
+ return -ENETDOWN;
+
+ for (i = 0; i < n; i++) {
+ if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
+ tq->stats.xdp_xmit_err++;
+ break;
+ }
+ }
+ tq->stats.xdp_xmit += i;
+
+ return i;
+}
+
+static int
+vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp,
+ struct bpf_prog *prog)
+{
+ struct xdp_frame *xdpf;
+ struct page *page;
+ int err;
+ u32 act;
+
+ rq->stats.xdp_packets++;
+ act = bpf_prog_run_xdp(prog, xdp);
+ page = virt_to_page(xdp->data_hard_start);
+
+ switch (act) {
+ case XDP_PASS:
+ return act;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(rq->adapter->netdev, xdp, prog);
+ if (!err) {
+ rq->stats.xdp_redirects++;
+ } else {
+ rq->stats.xdp_drops++;
+ page_pool_recycle_direct(rq->page_pool, page);
+ }
+ return act;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf ||
+ vmxnet3_xdp_xmit_back(rq->adapter, xdpf))) {
+ rq->stats.xdp_drops++;
+ page_pool_recycle_direct(rq->page_pool, page);
+ } else {
+ rq->stats.xdp_tx++;
+ }
+ return act;
+ default:
+ bpf_warn_invalid_xdp_action(rq->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(rq->adapter->netdev, prog, act);
+ rq->stats.xdp_aborted++;
+ break;
+ case XDP_DROP:
+ rq->stats.xdp_drops++;
+ break;
+ }
+
+ page_pool_recycle_direct(rq->page_pool, page);
+
+ return act;
+}
+
+static struct sk_buff *
+vmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page,
+ const struct xdp_buff *xdp)
+{
+ struct sk_buff *skb;
+
+ skb = build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_recycle_direct(rq->page_pool, page);
+ rq->stats.rx_buf_alloc_failure++;
+ return NULL;
+ }
+
+ /* bpf prog might change len and data position. */
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ skb_put(skb, xdp->data_end - xdp->data);
+ skb_mark_for_recycle(skb);
+
+ return skb;
+}
+
+/* Handle packets from DataRing. */
+int
+vmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter,
+ struct vmxnet3_rx_queue *rq,
+ void *data, int len,
+ struct sk_buff **skb_xdp_pass)
+{
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ struct page *page;
+ int act;
+
+ page = page_pool_alloc_pages(rq->page_pool, GFP_ATOMIC);
+ if (unlikely(!page)) {
+ rq->stats.rx_buf_alloc_failure++;
+ return XDP_DROP;
+ }
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
+ len, false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ /* Must copy the data because it's at dataring. */
+ memcpy(xdp.data, data, len);
+
+ xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
+ if (!xdp_prog) {
+ act = XDP_PASS;
+ goto out_skb;
+ }
+ act = vmxnet3_run_xdp(rq, &xdp, xdp_prog);
+ if (act != XDP_PASS)
+ return act;
+
+out_skb:
+ *skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp);
+ if (!*skb_xdp_pass)
+ return XDP_DROP;
+
+ /* No need to refill. */
+ return likely(*skb_xdp_pass) ? act : XDP_DROP;
+}
+
+int
+vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
+ struct vmxnet3_rx_queue *rq,
+ struct Vmxnet3_RxCompDesc *rcd,
+ struct vmxnet3_rx_buf_info *rbi,
+ struct Vmxnet3_RxDesc *rxd,
+ struct sk_buff **skb_xdp_pass)
+{
+ struct bpf_prog *xdp_prog;
+ dma_addr_t new_dma_addr;
+ struct xdp_buff xdp;
+ struct page *page;
+ void *new_data;
+ int act;
+
+ page = rbi->page;
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
+ page_pool_get_dma_addr(page) +
+ rq->page_pool->p.offset, rcd->len,
+ page_pool_get_dma_dir(rq->page_pool));
+
+ xdp_init_buff(&xdp, rbi->len, &rq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
+ rcd->len, false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
+ if (!xdp_prog) {
+ act = XDP_PASS;
+ goto out_skb;
+ }
+ act = vmxnet3_run_xdp(rq, &xdp, xdp_prog);
+
+ if (act == XDP_PASS) {
+out_skb:
+ *skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp);
+ if (!*skb_xdp_pass)
+ act = XDP_DROP;
+ }
+
+ new_data = vmxnet3_pp_get_buff(rq->page_pool, &new_dma_addr,
+ GFP_ATOMIC);
+ if (!new_data) {
+ rq->stats.rx_buf_alloc_failure++;
+ return XDP_DROP;
+ }
+ rbi->page = virt_to_page(new_data);
+ rbi->dma_addr = new_dma_addr;
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
+
+ return act;
+}
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.h b/drivers/net/vmxnet3/vmxnet3_xdp.h
new file mode 100644
index 000000000000..f9d843e060a3
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ * Copyright (C) 2008-2023, VMware, Inc. All Rights Reserved.
+ * Maintained by: [email protected]
+ *
+ */
+
+#ifndef _VMXNET3_XDP_H
+#define _VMXNET3_XDP_H
+
+#include <linux/filter.h>
+#include <linux/bpf_trace.h>
+#include <linux/netlink.h>
+
+#include "vmxnet3_int.h"
+
+#define VMXNET3_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
+#define VMXNET3_XDP_RX_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define VMXNET3_XDP_RX_OFFSET VMXNET3_XDP_HEADROOM
+#define VMXNET3_XDP_MAX_FRSIZE (PAGE_SIZE - VMXNET3_XDP_HEADROOM - \
+ VMXNET3_XDP_RX_TAILROOM)
+#define VMXNET3_XDP_MAX_MTU (VMXNET3_XDP_MAX_FRSIZE - ETH_HLEN - \
+ 2 * VLAN_HLEN - ETH_FCS_LEN)
+
+int vmxnet3_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
+int vmxnet3_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+int vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
+ struct vmxnet3_rx_queue *rq,
+ struct Vmxnet3_RxCompDesc *rcd,
+ struct vmxnet3_rx_buf_info *rbi,
+ struct Vmxnet3_RxDesc *rxd,
+ struct sk_buff **skb_xdp_pass);
+int vmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter,
+ struct vmxnet3_rx_queue *rq,
+ void *data, int len,
+ struct sk_buff **skb_xdp_pass);
+void *vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask);
+
+static inline bool vmxnet3_xdp_enabled(struct vmxnet3_adapter *adapter)
+{
+ return !!rcu_access_pointer(adapter->xdp_bpf_prog);
+}
+
+#endif
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 2bddcdf482a7..e463f59e95c2 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2328,14 +2328,11 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct vxlan_dev *dst_vxlan, __be32 vni,
bool snoop)
{
- struct pcpu_sw_netstats *tx_stats, *rx_stats;
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
struct net_device *dev;
int len = skb->len;
- tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
- rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
skb->dev = dst_vxlan->dev;
@@ -2361,17 +2358,11 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
if ((dst_vxlan->cfg.flags & VXLAN_F_LEARN) && snoop)
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
- u64_stats_update_begin(&tx_stats->syncp);
- u64_stats_inc(&tx_stats->tx_packets);
- u64_stats_add(&tx_stats->tx_bytes, len);
- u64_stats_update_end(&tx_stats->syncp);
+ dev_sw_netstats_tx_add(src_vxlan->dev, 1, len);
vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
- u64_stats_update_begin(&rx_stats->syncp);
- u64_stats_inc(&rx_stats->rx_packets);
- u64_stats_add(&rx_stats->rx_bytes, len);
- u64_stats_update_end(&rx_stats->syncp);
+ dev_sw_netstats_rx_add(dst_vxlan->dev, len);
vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
len);
} else {
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index 6d1bd9f52d02..dc09b75a3248 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -200,7 +200,7 @@ static int wg_get_device_start(struct netlink_callback *cb)
{
struct wg_device *wg;
- wg = lookup_interface(genl_dumpit_info(cb)->attrs, cb->skb);
+ wg = lookup_interface(genl_info_dump(cb)->attrs, cb->skb);
if (IS_ERR(wg))
return PTR_ERR(wg);
DUMP_CTX(cb)->wg = wg;
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 3f88e6a0a510..7d9a139db59e 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -554,7 +554,7 @@ static void wl1271_remove(struct spi_device *spi)
static struct spi_driver wl1271_spi_driver = {
.driver = {
.name = "wl1271_spi",
- .of_match_table = of_match_ptr(wlcore_spi_of_match_table),
+ .of_match_table = wlcore_spi_of_match_table,
},
.probe = wl1271_probe,
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index bb76c7c7cc82..b027be0b0b6f 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -200,18 +200,7 @@ static struct miscdevice miscdev = {
.mode = 0600,
};
-static int __init virtual_ncidev_init(void)
-{
- return misc_register(&miscdev);
-}
-
-static void __exit virtual_ncidev_exit(void)
-{
- misc_deregister(&miscdev);
-}
-
-module_init(virtual_ncidev_init);
-module_exit(virtual_ncidev_exit);
+module_misc_device(miscdev);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Virtual NCI device simulation driver");
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5818af8eca5a..dbf26bc89dd4 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -284,6 +284,11 @@ struct mem_cgroup {
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
+ /*
+ * Hint of reclaim pressure for socket memroy management. Note
+ * that this indicator should NOT be used in legacy cgroup mode
+ * where socket memory is accounted/charged separately.
+ */
unsigned long socket_pressure;
/* Legacy tcp memory accounting */
@@ -1727,8 +1732,8 @@ void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
- return true;
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ return !!memcg->tcpmem_pressure;
do {
if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
return true;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 80cc12a9a531..93399802ba77 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1208,9 +1208,7 @@ enum mlx5_cap_type {
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
- MLX5_CAP_RESERVED,
- MLX5_CAP_VECTOR_CALC,
- MLX5_CAP_QOS,
+ MLX5_CAP_QOS = 0xc,
MLX5_CAP_DEBUG,
MLX5_CAP_RESERVED_14,
MLX5_CAP_DEV_MEM,
@@ -1220,7 +1218,6 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
MLX5_CAP_CRYPTO = 0x1a,
- MLX5_CAP_DEV_SHAMPO = 0x1d,
MLX5_CAP_MACSEC = 0x1f,
MLX5_CAP_GENERAL_2 = 0x20,
MLX5_CAP_PORT_SELECTION = 0x25,
@@ -1239,7 +1236,6 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
- MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
MLX5_MCAM_REGS_NUM = 0x3,
};
@@ -1279,10 +1275,6 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
-#define MLX5_CAP_ETH_MAX(mdev, cap) \
- MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap)
-
#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
@@ -1305,77 +1297,40 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
-#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap)
-
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
-#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
-
#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
-#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
-
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
-#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
-
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-
#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
-#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
-
#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
-#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
-
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
-#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap)
-
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
-#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
-
#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
-#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
-
#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
-#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
-
#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
-#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, ft_field_support_2_esw_fdb.cap)
-
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
@@ -1384,10 +1339,6 @@ enum mlx5_qcam_feature_groups {
MLX5_GET64(flow_table_eswitch_cap, \
(mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
-#define MLX5_CAP_ESW_MAX(mdev, cap) \
- MLX5_GET(e_switch_cap, \
- mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
-
#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
MLX5_GET(port_selection_cap, \
mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
@@ -1400,26 +1351,15 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(adv_virtualization_cap, \
mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
-#define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \
- MLX5_GET(adv_virtualization_cap, \
- mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->max, cap)
-
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
-#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
- MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
-
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
#define MLX5_CAP_ODP_MAX(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
-#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
- MLX5_GET(vector_calc_cap, \
- mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap)
-
#define MLX5_CAP_QOS(mdev, cap)\
MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
@@ -1436,10 +1376,6 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
mng_access_reg_cap_mask.access_regs.reg)
-#define MLX5_CAP_MCAM_REG1(mdev, reg) \
- MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
- mng_access_reg_cap_mask.access_regs1.reg)
-
#define MLX5_CAP_MCAM_REG2(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
mng_access_reg_cap_mask.access_regs2.reg)
@@ -1485,9 +1421,6 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_CRYPTO(mdev, cap)\
MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
-#define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
- MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
-
#define MLX5_CAP_MACSEC(mdev, cap)\
MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index e1c7e502a4fc..c9d82e74daaa 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1022,7 +1022,6 @@ bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 87fd6f9ed82c..08dcb1f43be7 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1314,33 +1314,6 @@ struct mlx5_ifc_odp_cap_bits {
u8 reserved_at_120[0x6E0];
};
-struct mlx5_ifc_calc_op {
- u8 reserved_at_0[0x10];
- u8 reserved_at_10[0x9];
- u8 op_swap_endianness[0x1];
- u8 op_min[0x1];
- u8 op_xor[0x1];
- u8 op_or[0x1];
- u8 op_and[0x1];
- u8 op_max[0x1];
- u8 op_add[0x1];
-};
-
-struct mlx5_ifc_vector_calc_cap_bits {
- u8 calc_matrix[0x1];
- u8 reserved_at_1[0x1f];
- u8 reserved_at_20[0x8];
- u8 max_vec_count[0x8];
- u8 reserved_at_30[0xd];
- u8 max_chunk_size[0x3];
- struct mlx5_ifc_calc_op calc0;
- struct mlx5_ifc_calc_op calc1;
- struct mlx5_ifc_calc_op calc2;
- struct mlx5_ifc_calc_op calc3;
-
- u8 reserved_at_c0[0x720];
-};
-
struct mlx5_ifc_tls_cap_bits {
u8 tls_1_2_aes_gcm_128[0x1];
u8 tls_1_3_aes_gcm_128[0x1];
@@ -3435,20 +3408,6 @@ struct mlx5_ifc_roce_addr_layout_bits {
u8 reserved_at_e0[0x20];
};
-struct mlx5_ifc_shampo_cap_bits {
- u8 reserved_at_0[0x3];
- u8 shampo_log_max_reservation_size[0x5];
- u8 reserved_at_8[0x3];
- u8 shampo_log_min_reservation_size[0x5];
- u8 shampo_min_mss_size[0x10];
-
- u8 reserved_at_20[0x3];
- u8 shampo_max_log_headers_entry_size[0x5];
- u8 reserved_at_28[0x18];
-
- u8 reserved_at_40[0x7c0];
-};
-
struct mlx5_ifc_crypto_cap_bits {
u8 reserved_at_0[0x3];
u8 synchronize_dek[0x1];
@@ -3484,14 +3443,12 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
- struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
- struct mlx5_ifc_shampo_cap_bits shampo_cap;
struct mlx5_ifc_macsec_cap_bits macsec_cap;
struct mlx5_ifc_crypto_cap_bits crypto_cap;
u8 reserved_at_0[0x8000];
@@ -10858,8 +10815,9 @@ enum {
MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
- MLX5_MFRL_REG_RESET_STATE_TIMEOUT = 3,
+ MLX5_MFRL_REG_RESET_STATE_NEG_TIMEOUT = 3,
MLX5_MFRL_REG_RESET_STATE_NACK = 4,
+ MLX5_MFRL_REG_RESET_STATE_UNLOAD_TIMEOUT = 5,
};
enum {
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3c1ceedd1b77..1351b802ffcf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -110,6 +110,7 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
* @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
* @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
+ * @PHY_INTERFACE_MODE_PSGMII: Penta SGMII
* @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
* @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
* @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
@@ -147,6 +148,7 @@ typedef enum {
PHY_INTERFACE_MODE_XGMII,
PHY_INTERFACE_MODE_XLGMII,
PHY_INTERFACE_MODE_MOCA,
+ PHY_INTERFACE_MODE_PSGMII,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
PHY_INTERFACE_MODE_100BASEX,
@@ -254,6 +256,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "xlgmii";
case PHY_INTERFACE_MODE_MOCA:
return "moca";
+ case PHY_INTERFACE_MODE_PSGMII:
+ return "psgmii";
case PHY_INTERFACE_MODE_QSGMII:
return "qsgmii";
case PHY_INTERFACE_MODE_TRGMII:
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index af729859385e..aa90adc3b2a4 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -386,6 +386,7 @@ struct bt_sock {
enum {
BT_SK_DEFER_SETUP,
BT_SK_SUSPEND,
+ BT_SK_PKT_STATUS
};
struct bt_sock_list {
@@ -400,6 +401,8 @@ int bt_sock_register(int proto, const struct net_proto_family *ops);
void bt_sock_unregister(int proto);
void bt_sock_link(struct bt_sock_list *l, struct sock *s);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
+struct sock *bt_sock_alloc(struct net *net, struct socket *sock,
+ struct proto *prot, int proto, gfp_t prio, int kern);
int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
@@ -430,10 +433,6 @@ struct l2cap_ctrl {
struct l2cap_chan *chan;
};
-struct sco_ctrl {
- u8 pkt_status;
-};
-
struct hci_dev;
typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
@@ -464,16 +463,18 @@ struct bt_skb_cb {
u8 force_active;
u16 expect;
u8 incoming:1;
+ u8 pkt_status:2;
union {
struct l2cap_ctrl l2cap;
- struct sco_ctrl sco;
struct hci_ctrl hci;
struct mgmt_ctrl mgmt;
+ struct scm_creds creds;
};
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
+#define hci_skb_pkt_status(skb) bt_cb((skb))->pkt_status
#define hci_skb_expect(skb) bt_cb((skb))->expect
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
#define hci_skb_event(skb) bt_cb((skb))->hci.req_event
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 872dcb91a540..5723405b833e 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -309,6 +309,16 @@ enum {
* to support it.
*/
HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
+
+ /* When this quirk is set, MSFT extension monitor tracking by
+ * address filter is supported. Since tracking quantity of each
+ * pattern is limited, this feature supports tracking multiple
+ * devices concurrently if controller supports multiple
+ * address filters.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER,
};
/* HCI device flags */
@@ -577,6 +587,7 @@ enum {
#define HCI_LE_CIS_CENTRAL 0x10
#define HCI_LE_CIS_PERIPHERAL 0x20
#define HCI_LE_ISO_BROADCASTER 0x40
+#define HCI_LE_ISO_SYNC_RECEIVER 0x80
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index e01d52cb668c..c53d74236e3a 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -321,8 +321,8 @@ struct adv_monitor {
#define HCI_MAX_SHORT_NAME_LENGTH 10
-#define HCI_CONN_HANDLE_UNSET 0xffff
#define HCI_CONN_HANDLE_MAX 0x0eff
+#define HCI_CONN_HANDLE_UNSET(_handle) (_handle > HCI_CONN_HANDLE_MAX)
/* Min encryption key size to match with SMP */
#define HCI_MIN_ENC_KEY_SIZE 7
@@ -739,6 +739,7 @@ struct hci_conn {
unsigned long flags;
enum conn_reasons conn_reason;
+ __u8 abort_reason;
__u32 clock;
__u16 clock_accuracy;
@@ -758,7 +759,6 @@ struct hci_conn {
struct delayed_work auto_accept_work;
struct delayed_work idle_work;
struct delayed_work le_conn_timeout;
- struct work_struct le_scan_cleanup;
struct device dev;
struct dentry *debugfs;
@@ -974,6 +974,10 @@ enum {
HCI_CONN_SCANNING,
HCI_CONN_AUTH_FAILURE,
HCI_CONN_PER_ADV,
+ HCI_CONN_BIG_CREATED,
+ HCI_CONN_CREATE_CIS,
+ HCI_CONN_BIG_SYNC,
+ HCI_CONN_BIG_SYNC_FAILED,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -1093,8 +1097,7 @@ static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
}
static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
- bdaddr_t *ba,
- __u8 big, __u8 bis)
+ bdaddr_t *ba, __u8 bis)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1105,7 +1108,33 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
if (bacmp(&c->dst, ba) || c->type != ISO_LINK)
continue;
- if (c->iso_qos.bcast.big == big && c->iso_qos.bcast.bis == bis) {
+ if (c->iso_qos.bcast.bis == bis) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
+hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
+ bdaddr_t *ba,
+ __u8 big, __u8 bis)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, ba) || c->type != ISO_LINK ||
+ !test_bit(HCI_CONN_PER_ADV, &c->flags))
+ continue;
+
+ if (c->iso_qos.bcast.big == big &&
+ c->iso_qos.bcast.bis == bis) {
rcu_read_unlock();
return c;
}
@@ -1190,7 +1219,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK)
+ if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
continue;
/* Match CIG ID if set */
@@ -1222,7 +1251,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK)
+ if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
continue;
if (handle == c->iso_qos.ucast.cig) {
@@ -1259,6 +1288,29 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
return NULL;
}
+static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *hdev,
+ __u8 handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK)
+ continue;
+
+ if (handle == c->iso_qos.bcast.big) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
__u8 type, __u16 state)
{
@@ -1320,11 +1372,33 @@ static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
return NULL;
}
+/* Returns true if an le connection is in the scanning state */
+static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == LE_LINK && c->state == BT_CONNECT &&
+ test_bit(HCI_CONN_SCANNING, &c->flags)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return false;
+}
+
int hci_disconnect(struct hci_conn *conn, __u8 reason);
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
bool hci_iso_setup_path(struct hci_conn *conn);
-int hci_le_create_cis(struct hci_conn *conn);
+int hci_le_create_cis_pending(struct hci_dev *hdev);
+int hci_conn_check_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
u8 role);
@@ -1351,6 +1425,9 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u16 setting, struct bt_codec *codec);
struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, struct bt_iso_qos *qos);
+struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ struct bt_iso_qos *qos,
+ __u8 base_len, __u8 *base);
struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, struct bt_iso_qos *qos);
struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
@@ -1369,6 +1446,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
void hci_conn_failed(struct hci_conn *conn, u8 status);
+u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle);
/*
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
@@ -1745,6 +1823,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+/* Maximum advertising length */
+#define max_adv_len(dev) \
+ (ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH)
+
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
*
* C24: Mandatory if the LE Controller supports Connection State and either
@@ -1765,6 +1847,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define cis_peripheral_capable(dev) \
((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
#define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
+#define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER)
#define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
(!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks)))
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 2495be4d8b82..57eeb07aeb25 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -5,6 +5,9 @@
* Copyright (C) 2021 Intel Corporation
*/
+#define UINT_PTR(_handle) ((void *)((uintptr_t)_handle))
+#define PTR_UINT(_ptr) ((uintptr_t)((void *)_ptr))
+
typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
int err);
@@ -124,7 +127,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
-int hci_le_create_cis_sync(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_le_create_cis_sync(struct hci_dev *hdev);
int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 5e68b3dd4422..d382679efd2b 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -111,6 +111,8 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_WIDEBAND_SPEECH BIT(17)
#define MGMT_SETTING_CIS_CENTRAL BIT(18)
#define MGMT_SETTING_CIS_PERIPHERAL BIT(19)
+#define MGMT_SETTING_ISO_BROADCASTER BIT(20)
+#define MGMT_SETTING_ISO_SYNC_RECEIVER BIT(21)
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 1aa2e14b6c94..f40ddb4264fc 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -46,6 +46,4 @@ struct sco_conninfo {
__u8 dev_class[3];
};
-#define SCO_CMSG_PKT_STATUS 0x01
-
#endif /* __SCO_H */
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
index 685fb37df8e8..56cb7be92244 100644
--- a/include/net/dropreason.h
+++ b/include/net/dropreason.h
@@ -23,6 +23,12 @@ enum skb_drop_reason_subsys {
*/
SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR,
+ /**
+ * @SKB_DROP_REASON_SUBSYS_OPENVSWITCH: openvswitch drop reasons,
+ * see net/openvswitch/drop.h
+ */
+ SKB_DROP_REASON_SUBSYS_OPENVSWITCH,
+
/** @SKB_DROP_REASON_SUBSYS_NUM: number of subsystems defined */
SKB_DROP_REASON_SUBSYS_NUM
};
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index ed4622dd4828..e18a4c0d69ee 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -93,9 +93,9 @@ struct genl_family {
* struct genl_info - receiving information
* @snd_seq: sending sequence number
* @snd_portid: netlink portid of sender
+ * @family: generic netlink family
* @nlhdr: netlink message header
* @genlhdr: generic netlink message header
- * @userhdr: user specific header
* @attrs: netlink attributes
* @_net: network namespace
* @user_ptr: user pointers
@@ -104,16 +104,16 @@ struct genl_family {
struct genl_info {
u32 snd_seq;
u32 snd_portid;
- struct nlmsghdr * nlhdr;
+ const struct genl_family *family;
+ const struct nlmsghdr * nlhdr;
struct genlmsghdr * genlhdr;
- void * userhdr;
struct nlattr ** attrs;
possible_net_t _net;
void * user_ptr[2];
struct netlink_ext_ack *extack;
};
-static inline struct net *genl_info_net(struct genl_info *info)
+static inline struct net *genl_info_net(const struct genl_info *info)
{
return read_pnet(&info->_net);
}
@@ -123,6 +123,11 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
write_pnet(&info->_net, net);
}
+static inline void *genl_info_userhdr(const struct genl_info *info)
+{
+ return (u8 *)info->genlhdr + GENL_HDRLEN;
+}
+
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
#define GENL_SET_ERR_MSG_FMT(info, msg, args...) \
@@ -244,14 +249,13 @@ struct genl_split_ops {
/**
* struct genl_dumpit_info - info that is available during dumpit op call
- * @family: generic netlink family - for internal genl code usage
* @op: generic netlink ops - for internal genl code usage
* @attrs: netlink attributes
+ * @info: struct genl_info describing the request
*/
struct genl_dumpit_info {
- const struct genl_family *family;
struct genl_split_ops op;
- struct nlattr **attrs;
+ struct genl_info info;
};
static inline const struct genl_dumpit_info *
@@ -260,6 +264,38 @@ genl_dumpit_info(struct netlink_callback *cb)
return cb->data;
}
+static inline const struct genl_info *
+genl_info_dump(struct netlink_callback *cb)
+{
+ return &genl_dumpit_info(cb)->info;
+}
+
+/**
+ * genl_info_init_ntf() - initialize genl_info for notifications
+ * @info: genl_info struct to set up
+ * @family: pointer to the genetlink family
+ * @cmd: command to be used in the notification
+ *
+ * Initialize a locally declared struct genl_info to pass to various APIs.
+ * Intended to be used when creating notifications.
+ */
+static inline void
+genl_info_init_ntf(struct genl_info *info, const struct genl_family *family,
+ u8 cmd)
+{
+ struct genlmsghdr *hdr = (void *) &info->user_ptr[0];
+
+ memset(info, 0, sizeof(*info));
+ info->family = family;
+ info->genlhdr = hdr;
+ hdr->cmd = cmd;
+}
+
+static inline bool genl_info_is_ntf(const struct genl_info *info)
+{
+ return !info->nlhdr;
+}
+
int genl_register_family(struct genl_family *family);
int genl_unregister_family(const struct genl_family *family);
void genl_notify(const struct genl_family *family, struct sk_buff *skb,
@@ -268,6 +304,32 @@ void genl_notify(const struct genl_family *family, struct sk_buff *skb,
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
const struct genl_family *family, int flags, u8 cmd);
+static inline void *
+__genlmsg_iput(struct sk_buff *skb, const struct genl_info *info, int flags)
+{
+ return genlmsg_put(skb, info->snd_portid, info->snd_seq, info->family,
+ flags, info->genlhdr->cmd);
+}
+
+/**
+ * genlmsg_iput - start genetlink message based on genl_info
+ * @skb: skb in which message header will be placed
+ * @info: genl_info as provided to do/dump handlers
+ *
+ * Convenience wrapper which starts a genetlink message based on
+ * information in user request. @info should be either the struct passed
+ * by genetlink core to do/dump handlers (when constructing replies to
+ * such requests) or a struct initialized by genl_info_init_ntf()
+ * when constructing notifications.
+ *
+ * Returns pointer to new genetlink header.
+ */
+static inline void *
+genlmsg_iput(struct sk_buff *skb, const struct genl_info *info)
+{
+ return __genlmsg_iput(skb, info, 0);
+}
+
/**
* genlmsg_nlhdr - Obtain netlink header from user specified header
* @user_hdr: user header as returned from genlmsg_put()
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index b86b8e21de7f..f50a644d87a9 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -40,8 +40,10 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
int inet_shutdown(struct socket *sock, int how);
int inet_listen(struct socket *sock, int backlog);
+int __inet_listen_sk(struct sock *sk, int backlog);
void inet_sock_destruct(struct sock *sk);
int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len);
/* Don't allocate port at this moment, defer to connect. */
#define BIND_FORCE_ADDRESS_NO_PORT (1 << 0)
/* Grab and release socket lock. */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index c2b15f7e5516..5d2fcc137b88 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -164,7 +164,8 @@ enum inet_csk_ack_state_t {
ICSK_ACK_TIMER = 2,
ICSK_ACK_PUSHED = 4,
ICSK_ACK_PUSHED2 = 8,
- ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
+ ICSK_ACK_NOW = 16, /* Send the next ACK immediately (once) */
+ ICSK_ACK_NOMEM = 32,
};
void inet_csk_init_xmit_timers(struct sock *sk,
@@ -341,9 +342,9 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
}
-static inline bool inet_csk_has_ulp(struct sock *sk)
+static inline bool inet_csk_has_ulp(const struct sock *sk)
{
- return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
+ return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
}
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 0bb32bfc6183..acbb93d7607a 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -194,13 +194,13 @@ struct rtable;
* @inet_rcv_saddr - Bound local IPv4 addr
* @inet_dport - Destination port
* @inet_num - Local port
+ * @inet_flags - various atomic flags
* @inet_saddr - Sending source
* @uc_ttl - Unicast TTL
* @inet_sport - Source port
* @inet_id - ID counter for DF pkts
* @tos - TOS
* @mc_ttl - Multicasting TTL
- * @is_icsk - is this an inet_connection_sock?
* @uc_index - Unicast outgoing device index
* @mc_index - Multicast device index
* @mc_list - Group array
@@ -218,57 +218,88 @@ struct inet_sock {
#define inet_dport sk.__sk_common.skc_dport
#define inet_num sk.__sk_common.skc_num
+ unsigned long inet_flags;
__be32 inet_saddr;
__s16 uc_ttl;
- __u16 cmsg_flags;
- struct ip_options_rcu __rcu *inet_opt;
__be16 inet_sport;
+ struct ip_options_rcu __rcu *inet_opt;
__u16 inet_id;
__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
__u8 pmtudisc;
- __u8 recverr:1,
- is_icsk:1,
- freebind:1,
- hdrincl:1,
- mc_loop:1,
- transparent:1,
- mc_all:1,
- nodefrag:1;
- __u8 bind_address_no_port:1,
- recverr_rfc4884:1,
- defer_connect:1; /* Indicates that fastopen_connect is set
- * and cookie exists so we defer connect
- * until first data frame is written
- */
__u8 rcv_tos;
__u8 convert_csum;
int uc_index;
int mc_index;
__be32 mc_addr;
- struct ip_mc_socklist __rcu *mc_list;
- struct inet_cork_full cork;
struct {
__u16 lo;
__u16 hi;
} local_port_range;
+
+ struct ip_mc_socklist __rcu *mc_list;
+ struct inet_cork_full cork;
};
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
+enum {
+ INET_FLAGS_PKTINFO = 0,
+ INET_FLAGS_TTL = 1,
+ INET_FLAGS_TOS = 2,
+ INET_FLAGS_RECVOPTS = 3,
+ INET_FLAGS_RETOPTS = 4,
+ INET_FLAGS_PASSSEC = 5,
+ INET_FLAGS_ORIGDSTADDR = 6,
+ INET_FLAGS_CHECKSUM = 7,
+ INET_FLAGS_RECVFRAGSIZE = 8,
+
+ INET_FLAGS_RECVERR = 9,
+ INET_FLAGS_RECVERR_RFC4884 = 10,
+ INET_FLAGS_FREEBIND = 11,
+ INET_FLAGS_HDRINCL = 12,
+ INET_FLAGS_MC_LOOP = 13,
+ INET_FLAGS_MC_ALL = 14,
+ INET_FLAGS_TRANSPARENT = 15,
+ INET_FLAGS_IS_ICSK = 16,
+ INET_FLAGS_NODEFRAG = 17,
+ INET_FLAGS_BIND_ADDRESS_NO_PORT = 18,
+ INET_FLAGS_DEFER_CONNECT = 19,
+};
+
/* cmsg flags for inet */
-#define IP_CMSG_PKTINFO BIT(0)
-#define IP_CMSG_TTL BIT(1)
-#define IP_CMSG_TOS BIT(2)
-#define IP_CMSG_RECVOPTS BIT(3)
-#define IP_CMSG_RETOPTS BIT(4)
-#define IP_CMSG_PASSSEC BIT(5)
-#define IP_CMSG_ORIGDSTADDR BIT(6)
-#define IP_CMSG_CHECKSUM BIT(7)
-#define IP_CMSG_RECVFRAGSIZE BIT(8)
+#define IP_CMSG_PKTINFO BIT(INET_FLAGS_PKTINFO)
+#define IP_CMSG_TTL BIT(INET_FLAGS_TTL)
+#define IP_CMSG_TOS BIT(INET_FLAGS_TOS)
+#define IP_CMSG_RECVOPTS BIT(INET_FLAGS_RECVOPTS)
+#define IP_CMSG_RETOPTS BIT(INET_FLAGS_RETOPTS)
+#define IP_CMSG_PASSSEC BIT(INET_FLAGS_PASSSEC)
+#define IP_CMSG_ORIGDSTADDR BIT(INET_FLAGS_ORIGDSTADDR)
+#define IP_CMSG_CHECKSUM BIT(INET_FLAGS_CHECKSUM)
+#define IP_CMSG_RECVFRAGSIZE BIT(INET_FLAGS_RECVFRAGSIZE)
+
+#define IP_CMSG_ALL (IP_CMSG_PKTINFO | IP_CMSG_TTL | \
+ IP_CMSG_TOS | IP_CMSG_RECVOPTS | \
+ IP_CMSG_RETOPTS | IP_CMSG_PASSSEC | \
+ IP_CMSG_ORIGDSTADDR | IP_CMSG_CHECKSUM | \
+ IP_CMSG_RECVFRAGSIZE)
+
+static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
+{
+ return READ_ONCE(inet->inet_flags) & IP_CMSG_ALL;
+}
+
+#define inet_test_bit(nr, sk) \
+ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet_set_bit(nr, sk) \
+ set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet_clear_bit(nr, sk) \
+ clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
static inline bool sk_is_inet(struct sock *sk)
{
@@ -363,7 +394,7 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
__u8 flags = 0;
- if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
+ if (inet_test_bit(TRANSPARENT, sk) || inet_test_bit(HDRINCL, sk))
flags |= FLOWI_FLAG_ANYSRC;
return flags;
}
@@ -389,7 +420,8 @@ static inline bool inet_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) ||
- inet->freebind || inet->transparent;
+ test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) ||
+ test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags);
}
static inline bool inet_addr_valid_or_nonlocal(struct net *net,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 05e6f756feaf..c9ff23cf313e 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -179,6 +179,9 @@ struct fib6_info {
refcount_t fib6_ref;
unsigned long expires;
+
+ struct hlist_node gc_link;
+
struct dst_metrics *fib6_metrics;
#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
@@ -247,19 +250,6 @@ static inline bool fib6_requires_src(const struct fib6_info *rt)
return rt->fib6_src.plen > 0;
}
-static inline void fib6_clean_expires(struct fib6_info *f6i)
-{
- f6i->fib6_flags &= ~RTF_EXPIRES;
- f6i->expires = 0;
-}
-
-static inline void fib6_set_expires(struct fib6_info *f6i,
- unsigned long expires)
-{
- f6i->expires = expires;
- f6i->fib6_flags |= RTF_EXPIRES;
-}
-
static inline bool fib6_check_expired(const struct fib6_info *f6i)
{
if (f6i->fib6_flags & RTF_EXPIRES)
@@ -267,6 +257,11 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i)
return false;
}
+static inline bool fib6_has_expires(const struct fib6_info *f6i)
+{
+ return f6i->fib6_flags & RTF_EXPIRES;
+}
+
/* Function to safely get fn->fn_sernum for passed in rt
* and store result in passed in cookie.
* Return true if we can get cookie safely
@@ -388,6 +383,7 @@ struct fib6_table {
struct inet_peer_base tb6_peers;
unsigned int flags;
unsigned int fib_seq;
+ struct hlist_head tb6_gc_hlist; /* GC candidates */
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -504,6 +500,48 @@ void fib6_gc_cleanup(void);
int fib6_init(void);
+/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be
+ * NULL.
+ */
+static inline void fib6_set_expires_locked(struct fib6_info *f6i,
+ unsigned long expires)
+{
+ struct fib6_table *tb6;
+
+ tb6 = f6i->fib6_table;
+ f6i->expires = expires;
+ if (tb6 && !fib6_has_expires(f6i))
+ hlist_add_head(&f6i->gc_link, &tb6->tb6_gc_hlist);
+ f6i->fib6_flags |= RTF_EXPIRES;
+}
+
+/* fib6_info must be locked by the caller, and fib6_info->fib6_table can be
+ * NULL. If fib6_table is NULL, the fib6_info will no be inserted into the
+ * list of GC candidates until it is inserted into a table.
+ */
+static inline void fib6_set_expires(struct fib6_info *f6i,
+ unsigned long expires)
+{
+ spin_lock_bh(&f6i->fib6_table->tb6_lock);
+ fib6_set_expires_locked(f6i, expires);
+ spin_unlock_bh(&f6i->fib6_table->tb6_lock);
+}
+
+static inline void fib6_clean_expires_locked(struct fib6_info *f6i)
+{
+ if (fib6_has_expires(f6i))
+ hlist_del_init(&f6i->gc_link);
+ f6i->fib6_flags &= ~RTF_EXPIRES;
+ f6i->expires = 0;
+}
+
+static inline void fib6_clean_expires(struct fib6_info *f6i)
+{
+ spin_lock_bh(&f6i->fib6_table->tb6_lock);
+ fib6_clean_expires_locked(f6i);
+ spin_unlock_bh(&f6i->fib6_table->tb6_lock);
+}
+
struct ipv6_route_iter {
struct seq_net_private p;
struct fib6_walker w;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 2acc4c808d45..d40d8238d4c2 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -937,7 +937,8 @@ static inline bool ipv6_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
return net->ipv6.sysctl.ip_nonlocal_bind ||
- inet->freebind || inet->transparent;
+ test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) ||
+ test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags);
}
/* Sysctl settings for net ipv6.auto_flowlabels */
@@ -1216,6 +1217,7 @@ void inet6_cleanup_sock(struct sock *sk);
void inet6_sock_destruct(struct sock *sk);
int inet6_release(struct socket *sock);
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet6_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 879990101c9f..9f70b4332238 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -352,6 +352,13 @@ struct mana_tx_qp {
struct mana_ethtool_stats {
u64 stop_queue;
u64 wake_queue;
+ u64 hc_tx_bytes;
+ u64 hc_tx_ucast_pkts;
+ u64 hc_tx_ucast_bytes;
+ u64 hc_tx_bcast_pkts;
+ u64 hc_tx_bcast_bytes;
+ u64 hc_tx_mcast_pkts;
+ u64 hc_tx_mcast_bytes;
u64 tx_cqe_err;
u64 tx_cqe_unknown_type;
u64 rx_coalesced_err;
@@ -442,6 +449,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+void mana_query_gf_stats(struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
@@ -583,6 +591,49 @@ struct mana_fence_rq_resp {
struct gdma_resp_hdr hdr;
}; /* HW DATA */
+/* Query stats RQ */
+struct mana_query_gf_stat_req {
+ struct gdma_req_hdr hdr;
+ u64 req_stats;
+}; /* HW DATA */
+
+struct mana_query_gf_stat_resp {
+ struct gdma_resp_hdr hdr;
+ u64 reported_stats;
+ /* rx errors/discards */
+ u64 discard_rx_nowqe;
+ u64 err_rx_vport_disabled;
+ /* rx bytes/packets */
+ u64 hc_rx_bytes;
+ u64 hc_rx_ucast_pkts;
+ u64 hc_rx_ucast_bytes;
+ u64 hc_rx_bcast_pkts;
+ u64 hc_rx_bcast_bytes;
+ u64 hc_rx_mcast_pkts;
+ u64 hc_rx_mcast_bytes;
+ /* tx errors */
+ u64 err_tx_gf_disabled;
+ u64 err_tx_vport_disabled;
+ u64 err_tx_inval_vport_offset_pkt;
+ u64 err_tx_vlan_enforcement;
+ u64 err_tx_ethtype_enforcement;
+ u64 err_tx_SA_enforecement;
+ u64 err_tx_SQPDID_enforcement;
+ u64 err_tx_CQPDID_enforcement;
+ u64 err_tx_mtu_violation;
+ u64 err_tx_inval_oob;
+ /* tx bytes/packets */
+ u64 hc_tx_bytes;
+ u64 hc_tx_ucast_pkts;
+ u64 hc_tx_ucast_bytes;
+ u64 hc_tx_bcast_pkts;
+ u64 hc_tx_bcast_bytes;
+ u64 hc_tx_mcast_pkts;
+ u64 hc_tx_mcast_bytes;
+ /* tx error */
+ u64 err_tx_gdma;
+}; /* HW DATA */
+
/* Configure vPort Rx Steering */
struct mana_cfg_rx_steer_req_v2 {
struct gdma_req_hdr hdr;
@@ -662,6 +713,42 @@ struct mana_deregister_filter_resp {
struct gdma_resp_hdr hdr;
}; /* HW DATA */
+/* Requested GF stats Flags */
+/* Rx discards/Errors */
+#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001
+#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002
+/* Rx bytes/pkts */
+#define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004
+#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008
+#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010
+#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020
+#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040
+#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080
+#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100
+/* Tx errors */
+#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200
+#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400
+#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \
+ 0x0000000000000800
+#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000
+#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \
+ 0x0000000000002000
+#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000
+#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000
+#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000
+#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000
+#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000
+/* Tx bytes/pkts */
+#define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000
+#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000
+#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000
+#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000
+#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000
+#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000
+#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000
+/* Tx error */
+#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000
+
#define MANA_MAX_NUM_QUEUES 64
#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 8c77832d0240..cc8060c017d5 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -2,8 +2,6 @@
#ifndef _NETNS_NFTABLES_H_
#define _NETNS_NFTABLES_H_
-#include <linux/list.h>
-
struct netns_nftables {
u8 gencursor;
};
diff --git a/include/net/route.h b/include/net/route.h
index d9ca98d2366f..51a45b1887b5 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -298,7 +298,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
{
__u8 flow_flags = 0;
- if (inet_sk(sk)->transparent)
+ if (inet_test_bit(TRANSPARENT, sk))
flow_flags |= FLOWI_FLAG_ANYSRC;
flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk),
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6d77c08d83b7..07b21d9a9620 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2031,7 +2031,7 @@ static inline bool inet_sk_transparent(const struct sock *sk)
case TCP_NEW_SYN_RECV:
return inet_rsk(inet_reqsk(sk))->no_srccheck;
}
- return inet_sk(sk)->transparent;
+ return inet_test_bit(TRANSPARENT, sk);
}
/* Determines whether this is a thin stream (which may suffer from
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index e94870e77ee9..efc82c318fa2 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -965,6 +965,7 @@ struct check_pkt_len_arg {
* start of the packet or at the start of the l3 header depending on the value
* of l3 tunnel flag in the tun_flags field of OVS_ACTION_ATTR_ADD_MPLS
* argument.
+ * @OVS_ACTION_ATTR_DROP: Explicit drop action.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -1002,6 +1003,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_CHECK_PKT_LEN, /* Nested OVS_CHECK_PKT_LEN_ATTR_*. */
OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */
OVS_ACTION_ATTR_DEC_TTL, /* Nested OVS_DEC_TTL_ATTR_*. */
+ OVS_ACTION_ATTR_DROP, /* u32 error code. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index b52644771cc4..22c6689d9302 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -244,6 +244,14 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
if (mem_cgroup_disabled())
return;
+ /*
+ * The in-kernel users only care about the reclaim efficiency
+ * for this @memcg rather than the whole subtree, and there
+ * isn't and won't be any in-kernel user in a legacy cgroup.
+ */
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !tree)
+ return;
+
vmpr = memcg_to_vmpressure(memcg);
/*
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 1c3c7ff5c3c6..336a76165454 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -140,6 +140,35 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto,
return err;
}
+struct sock *bt_sock_alloc(struct net *net, struct socket *sock,
+ struct proto *prot, int proto, gfp_t prio, int kern)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(net, PF_BLUETOOTH, prio, prot, kern);
+ if (!sk)
+ return NULL;
+
+ sock_init_data(sock, sk);
+ INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
+
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
+ sk->sk_protocol = proto;
+ sk->sk_state = BT_OPEN;
+
+ /* Init peer information so it can be properly monitored */
+ if (!kern) {
+ spin_lock(&sk->sk_peer_lock);
+ sk->sk_peer_pid = get_pid(task_tgid(current));
+ sk->sk_peer_cred = get_current_cred();
+ spin_unlock(&sk->sk_peer_lock);
+ }
+
+ return sk;
+}
+EXPORT_SYMBOL(bt_sock_alloc);
+
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
{
write_lock(&l->lock);
@@ -158,6 +187,9 @@ EXPORT_SYMBOL(bt_sock_unlink);
void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
{
+ const struct cred *old_cred;
+ struct pid *old_pid;
+
BT_DBG("parent %p, sk %p", parent, sk);
sock_hold(sk);
@@ -170,6 +202,19 @@ void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
bt_sk(sk)->parent = parent;
+ /* Copy credentials from parent since for incoming connections the
+ * socket is allocated by the kernel.
+ */
+ spin_lock(&sk->sk_peer_lock);
+ old_pid = sk->sk_peer_pid;
+ old_cred = sk->sk_peer_cred;
+ sk->sk_peer_pid = get_pid(parent->sk_peer_pid);
+ sk->sk_peer_cred = get_cred(parent->sk_peer_cred);
+ spin_unlock(&sk->sk_peer_lock);
+
+ put_pid(old_pid);
+ put_cred(old_cred);
+
if (bh)
bh_unlock_sock(sk);
else
@@ -288,8 +333,12 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
&msg->msg_namelen);
- if (bt_sk(sk)->skb_put_cmsg)
- bt_sk(sk)->skb_put_cmsg(skb, msg, sk);
+ if (test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags)) {
+ u8 pkt_status = hci_skb_pkt_status(skb);
+
+ put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS,
+ sizeof(pkt_status), &pkt_status);
+ }
}
skb_free_datagram(sk, skb);
diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
index 832764dfbfb3..97c87abd129f 100644
--- a/net/bluetooth/amp.h
+++ b/net/bluetooth/amp.h
@@ -28,7 +28,6 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type);
-void amp_read_loc_info(struct hci_dev *hdev, struct amp_mgr *mgr);
void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle);
void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr);
void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 57d509d77cb4..00d47bcf4d7d 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -205,21 +205,13 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern);
+ sk = bt_sock_alloc(net, sock, &bnep_proto, protocol, GFP_ATOMIC, kern);
if (!sk)
return -ENOMEM;
- sock_init_data(sock, sk);
-
sock->ops = &bnep_sock_ops;
-
sock->state = SS_UNCONNECTED;
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- sk->sk_protocol = protocol;
- sk->sk_state = BT_OPEN;
-
bt_sock_link(&bnep_sk_list, sk);
return 0;
}
diff --git a/net/bluetooth/coredump.c b/net/bluetooth/coredump.c
index d2d2624ec708..ec97a4bab1c9 100644
--- a/net/bluetooth/coredump.c
+++ b/net/bluetooth/coredump.c
@@ -100,8 +100,7 @@ void hci_devcd_reset(struct hci_dev *hdev)
/* Call with hci_dev_lock only. */
static void hci_devcd_free(struct hci_dev *hdev)
{
- if (hdev->dump.head)
- vfree(hdev->dump.head);
+ vfree(hdev->dump.head);
hci_devcd_reset(hdev);
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 76222565e2df..234746721047 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -178,57 +178,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
hci_conn_put(conn);
}
-static void le_scan_cleanup(struct work_struct *work)
-{
- struct hci_conn *conn = container_of(work, struct hci_conn,
- le_scan_cleanup);
- struct hci_dev *hdev = conn->hdev;
- struct hci_conn *c = NULL;
-
- BT_DBG("%s hcon %p", hdev->name, conn);
-
- hci_dev_lock(hdev);
-
- /* Check that the hci_conn is still around */
- rcu_read_lock();
- list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
- if (c == conn)
- break;
- }
- rcu_read_unlock();
-
- if (c == conn) {
- hci_connect_le_scan_cleanup(conn, 0x00);
- hci_conn_cleanup(conn);
- }
-
- hci_dev_unlock(hdev);
- hci_dev_put(hdev);
- hci_conn_put(conn);
-}
-
-static void hci_connect_le_scan_remove(struct hci_conn *conn)
-{
- BT_DBG("%s hcon %p", conn->hdev->name, conn);
-
- /* We can't call hci_conn_del/hci_conn_cleanup here since that
- * could deadlock with another hci_conn_del() call that's holding
- * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
- * Instead, grab temporary extra references to the hci_dev and
- * hci_conn and perform the necessary cleanup in a separate work
- * callback.
- */
-
- hci_dev_hold(conn->hdev);
- hci_conn_get(conn);
-
- /* Even though we hold a reference to the hdev, many other
- * things might get cleaned up meanwhile, including the hdev's
- * own workqueue, so we can't use that for scheduling.
- */
- schedule_work(&conn->le_scan_cleanup);
-}
-
static void hci_acl_create_connection(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@@ -679,13 +628,6 @@ static void hci_conn_timeout(struct work_struct *work)
if (refcnt > 0)
return;
- /* LE connections in scanning state need special handling */
- if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
- test_bit(HCI_CONN_SCANNING, &conn->flags)) {
- hci_connect_le_scan_remove(conn);
- return;
- }
-
hci_abort_conn(conn, hci_proto_disconn_ind(conn));
}
@@ -791,7 +733,8 @@ struct iso_list_data {
u16 sync_handle;
};
int count;
- struct iso_cig_params pdu;
+ bool big_term;
+ bool big_sync_term;
};
static void bis_list(struct hci_conn *conn, void *data)
@@ -809,17 +752,6 @@ static void bis_list(struct hci_conn *conn, void *data)
d->count++;
}
-static void find_bis(struct hci_conn *conn, void *data)
-{
- struct iso_list_data *d = data;
-
- /* Ignore unicast */
- if (bacmp(&conn->dst, BDADDR_ANY))
- return;
-
- d->count++;
-}
-
static int terminate_big_sync(struct hci_dev *hdev, void *data)
{
struct iso_list_data *d = data;
@@ -828,11 +760,8 @@ static int terminate_big_sync(struct hci_dev *hdev, void *data)
hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
- /* Check if ISO connection is a BIS and terminate BIG if there are
- * no other connections using it.
- */
- hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
- if (d->count)
+ /* Only terminate BIG if it has been created */
+ if (!d->big_term)
return 0;
return hci_le_terminate_big_sync(hdev, d->big,
@@ -844,19 +773,21 @@ static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
kfree(data);
}
-static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
+static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
{
struct iso_list_data *d;
int ret;
- bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
+ bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
+ conn->iso_qos.bcast.bis);
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
- d->big = big;
- d->bis = bis;
+ d->big = conn->iso_qos.bcast.big;
+ d->bis = conn->iso_qos.bcast.bis;
+ d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
terminate_big_destroy);
@@ -873,31 +804,26 @@ static int big_terminate_sync(struct hci_dev *hdev, void *data)
bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
d->sync_handle);
- /* Check if ISO connection is a BIS and terminate BIG if there are
- * no other connections using it.
- */
- hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
- if (d->count)
- return 0;
-
- hci_le_big_terminate_sync(hdev, d->big);
+ if (d->big_sync_term)
+ hci_le_big_terminate_sync(hdev, d->big);
return hci_le_pa_terminate_sync(hdev, d->sync_handle);
}
-static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
+static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
{
struct iso_list_data *d;
int ret;
- bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
+ bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
d->big = big;
- d->sync_handle = sync_handle;
+ d->sync_handle = conn->sync_handle;
+ d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
terminate_big_destroy);
@@ -916,6 +842,7 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
static void bis_cleanup(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
+ struct hci_conn *bis;
bt_dev_dbg(hdev, "conn %p", conn);
@@ -923,17 +850,29 @@ static void bis_cleanup(struct hci_conn *conn)
if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
return;
- hci_le_terminate_big(hdev, conn->iso_qos.bcast.big,
- conn->iso_qos.bcast.bis);
+ /* Check if ISO connection is a BIS and terminate advertising
+ * set and BIG if there are no other connections using it.
+ */
+ bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
+ if (bis)
+ return;
+
+ hci_le_terminate_big(hdev, conn);
} else {
+ bis = hci_conn_hash_lookup_big_any_dst(hdev,
+ conn->iso_qos.bcast.big);
+
+ if (bis)
+ return;
+
hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
- conn->sync_handle);
+ conn);
}
}
static int remove_cig_sync(struct hci_dev *hdev, void *data)
{
- u8 handle = PTR_ERR(data);
+ u8 handle = PTR_UINT(data);
return hci_le_remove_cig_sync(hdev, handle);
}
@@ -942,7 +881,8 @@ static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
{
bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
- return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
+ return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
+ NULL);
}
static void find_cis(struct hci_conn *conn, void *data)
@@ -983,6 +923,25 @@ static void cis_cleanup(struct hci_conn *conn)
hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
}
+static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+ u16 handle = HCI_CONN_HANDLE_MAX + 1;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ /* Find the first unused handle */
+ if (handle == 0xffff || c->handle != handle)
+ break;
+ handle++;
+ }
+ rcu_read_unlock();
+
+ return handle;
+}
+
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
u8 role)
{
@@ -996,7 +955,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
bacpy(&conn->dst, dst);
bacpy(&conn->src, &hdev->bdaddr);
- conn->handle = HCI_CONN_HANDLE_UNSET;
+ conn->handle = hci_conn_hash_alloc_unset(hdev);
conn->hdev = hdev;
conn->type = type;
conn->role = role;
@@ -1059,7 +1018,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
- INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
atomic_set(&conn->refcnt, 0);
@@ -1109,7 +1067,7 @@ static void hci_conn_unlink(struct hci_conn *conn)
*/
if ((child->type == SCO_LINK ||
child->type == ESCO_LINK) &&
- child->handle == HCI_CONN_HANDLE_UNSET)
+ HCI_CONN_HANDLE_UNSET(child->handle))
hci_conn_del(child);
}
@@ -1273,9 +1231,41 @@ void hci_conn_failed(struct hci_conn *conn, u8 status)
hci_conn_del(conn);
}
+/* This function requires the caller holds hdev->lock */
+u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
+
+ if (conn->handle == handle)
+ return 0;
+
+ if (handle > HCI_CONN_HANDLE_MAX) {
+ bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
+ handle, HCI_CONN_HANDLE_MAX);
+ return HCI_ERROR_INVALID_PARAMETERS;
+ }
+
+ /* If abort_reason has been sent it means the connection is being
+ * aborted and the handle shall not be changed.
+ */
+ if (conn->abort_reason)
+ return conn->abort_reason;
+
+ conn->handle = handle;
+
+ return 0;
+}
+
static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
{
- struct hci_conn *conn = data;
+ struct hci_conn *conn;
+ u16 handle = PTR_UINT(data);
+
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!conn)
+ return;
bt_dev_dbg(hdev, "err %d", err);
@@ -1300,10 +1290,17 @@ done:
static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
{
- struct hci_conn *conn = data;
+ struct hci_conn *conn;
+ u16 handle = PTR_UINT(data);
+
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!conn)
+ return 0;
bt_dev_dbg(hdev, "conn %p", conn);
+ conn->state = BT_CONNECT;
+
return hci_le_create_conn_sync(hdev, conn);
}
@@ -1373,10 +1370,10 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
conn->sec_level = BT_SECURITY_LOW;
conn->conn_timeout = conn_timeout;
- conn->state = BT_CONNECT;
clear_bit(HCI_CONN_SCANNING, &conn->flags);
- err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
+ err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
+ UINT_PTR(conn->handle),
create_le_conn_complete);
if (err) {
hci_conn_del(conn);
@@ -1440,25 +1437,23 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev,
static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
{
- struct iso_list_data data;
+ struct hci_conn *conn;
+ u8 big;
/* Allocate a BIG if not set */
if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
- for (data.big = 0x00; data.big < 0xef; data.big++) {
- data.count = 0;
- data.bis = 0xff;
+ for (big = 0x00; big < 0xef; big++) {
- hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
- BT_BOUND, &data);
- if (!data.count)
+ conn = hci_conn_hash_lookup_big(hdev, big);
+ if (!conn)
break;
}
- if (data.big == 0xef)
+ if (big == 0xef)
return -EADDRNOTAVAIL;
/* Update BIG */
- qos->bcast.big = data.big;
+ qos->bcast.big = big;
}
return 0;
@@ -1466,28 +1461,27 @@ static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
{
- struct iso_list_data data;
+ struct hci_conn *conn;
+ u8 bis;
/* Allocate BIS if not set */
if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
/* Find an unused adv set to advertise BIS, skip instance 0x00
* since it is reserved as general purpose set.
*/
- for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets;
- data.bis++) {
- data.count = 0;
+ for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
+ bis++) {
- hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
- BT_BOUND, &data);
- if (!data.count)
+ conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
+ if (!conn)
break;
}
- if (data.bis == hdev->le_num_of_adv_sets)
+ if (bis == hdev->le_num_of_adv_sets)
return -EADDRNOTAVAIL;
/* Update BIS */
- qos->bcast.bis = data.bis;
+ qos->bcast.bis = bis;
}
return 0;
@@ -1495,10 +1489,10 @@ static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
/* This function requires the caller holds hdev->lock */
static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
- struct bt_iso_qos *qos)
+ struct bt_iso_qos *qos, __u8 base_len,
+ __u8 *base)
{
struct hci_conn *conn;
- struct iso_list_data data;
int err;
/* Let's make sure that le is enabled.*/
@@ -1516,24 +1510,26 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
if (err)
return ERR_PTR(err);
- data.big = qos->bcast.big;
- data.bis = qos->bcast.bis;
- data.count = 0;
-
- /* Check if there is already a matching BIG/BIS */
- hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data);
- if (data.count)
+ /* Check if the LE Create BIG command has already been sent */
+ conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
+ qos->bcast.big);
+ if (conn)
return ERR_PTR(-EADDRINUSE);
- conn = hci_conn_hash_lookup_bis(hdev, dst, qos->bcast.big, qos->bcast.bis);
- if (conn)
+ /* Check BIS settings against other bound BISes, since all
+ * BISes in a BIG must have the same value for all parameters
+ */
+ conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
+
+ if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
+ base_len != conn->le_per_adv_data_len ||
+ memcmp(conn->le_per_adv_data, base, base_len)))
return ERR_PTR(-EADDRINUSE);
conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
if (!conn)
return ERR_PTR(-ENOMEM);
- set_bit(HCI_CONN_PER_ADV, &conn->flags);
conn->state = BT_CONNECT;
hci_conn_hold(conn);
@@ -1707,52 +1703,25 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
return sco;
}
-static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
-{
- struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
-
- cis->cis_id = qos->ucast.cis;
- cis->c_sdu = cpu_to_le16(qos->ucast.out.sdu);
- cis->p_sdu = cpu_to_le16(qos->ucast.in.sdu);
- cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : qos->ucast.in.phy;
- cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : qos->ucast.out.phy;
- cis->c_rtn = qos->ucast.out.rtn;
- cis->p_rtn = qos->ucast.in.rtn;
-
- d->pdu.cp.num_cis++;
-}
-
-static void cis_list(struct hci_conn *conn, void *data)
-{
- struct iso_list_data *d = data;
-
- /* Skip if broadcast/ANY address */
- if (!bacmp(&conn->dst, BDADDR_ANY))
- return;
-
- if (d->cig != conn->iso_qos.ucast.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
- d->cis != conn->iso_qos.ucast.cis)
- return;
-
- d->count++;
-
- if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
- d->count >= ARRAY_SIZE(d->pdu.cis))
- return;
-
- cis_add(d, &conn->iso_qos);
-}
-
static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_le_create_big cp;
+ struct iso_list_data data;
memset(&cp, 0, sizeof(cp));
+ data.big = qos->bcast.big;
+ data.bis = qos->bcast.bis;
+ data.count = 0;
+
+ /* Create a BIS for each bound connection */
+ hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
+ BT_BOUND, &data);
+
cp.handle = qos->bcast.big;
cp.adv_handle = qos->bcast.bis;
- cp.num_bis = 0x01;
+ cp.num_bis = data.count;
hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
cp.bis.latency = cpu_to_le16(qos->bcast.out.latency);
@@ -1766,25 +1735,62 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
}
-static void set_cig_params_complete(struct hci_dev *hdev, void *data, int err)
+static int set_cig_params_sync(struct hci_dev *hdev, void *data)
{
- struct iso_cig_params *pdu = data;
+ u8 cig_id = PTR_UINT(data);
+ struct hci_conn *conn;
+ struct bt_iso_qos *qos;
+ struct iso_cig_params pdu;
+ u8 cis_id;
- bt_dev_dbg(hdev, "");
+ conn = hci_conn_hash_lookup_cig(hdev, cig_id);
+ if (!conn)
+ return 0;
- if (err)
- bt_dev_err(hdev, "Unable to set CIG parameters: %d", err);
+ memset(&pdu, 0, sizeof(pdu));
- kfree(pdu);
-}
+ qos = &conn->iso_qos;
+ pdu.cp.cig_id = cig_id;
+ hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
+ hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
+ pdu.cp.sca = qos->ucast.sca;
+ pdu.cp.packing = qos->ucast.packing;
+ pdu.cp.framing = qos->ucast.framing;
+ pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
+ pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
+
+ /* Reprogram all CIS(s) with the same CIG, valid range are:
+ * num_cis: 0x00 to 0x1F
+ * cis_id: 0x00 to 0xEF
+ */
+ for (cis_id = 0x00; cis_id < 0xf0 &&
+ pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
+ struct hci_cis_params *cis;
-static int set_cig_params_sync(struct hci_dev *hdev, void *data)
-{
- struct iso_cig_params *pdu = data;
- u32 plen;
+ conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
+ if (!conn)
+ continue;
+
+ qos = &conn->iso_qos;
+
+ cis = &pdu.cis[pdu.cp.num_cis++];
+ cis->cis_id = cis_id;
+ cis->c_sdu = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
+ cis->p_sdu = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
+ cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy :
+ qos->ucast.in.phy;
+ cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy :
+ qos->ucast.out.phy;
+ cis->c_rtn = qos->ucast.out.rtn;
+ cis->p_rtn = qos->ucast.in.rtn;
+ }
+
+ if (!pdu.cp.num_cis)
+ return 0;
- plen = sizeof(pdu->cp) + pdu->cp.num_cis * sizeof(pdu->cis[0]);
- return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, plen, pdu,
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
+ sizeof(pdu.cp) +
+ pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
HCI_CMD_TIMEOUT);
}
@@ -1792,7 +1798,6 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
{
struct hci_dev *hdev = conn->hdev;
struct iso_list_data data;
- struct iso_cig_params *pdu;
memset(&data, 0, sizeof(data));
@@ -1819,58 +1824,31 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
qos->ucast.cig = data.cig;
}
- data.pdu.cp.cig_id = qos->ucast.cig;
- hci_cpu_to_le24(qos->ucast.out.interval, data.pdu.cp.c_interval);
- hci_cpu_to_le24(qos->ucast.in.interval, data.pdu.cp.p_interval);
- data.pdu.cp.sca = qos->ucast.sca;
- data.pdu.cp.packing = qos->ucast.packing;
- data.pdu.cp.framing = qos->ucast.framing;
- data.pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
- data.pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
-
if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
- data.count = 0;
- data.cig = qos->ucast.cig;
- data.cis = qos->ucast.cis;
-
- hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
- &data);
- if (data.count)
+ if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
+ qos->ucast.cis))
return false;
-
- cis_add(&data, qos);
+ goto done;
}
- /* Reprogram all CIS(s) with the same CIG */
- for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0x11;
+ /* Allocate first available CIS if not set */
+ for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
data.cis++) {
- data.count = 0;
-
- hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
- &data);
- if (data.count)
- continue;
-
- /* Allocate a CIS if not set */
- if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) {
+ if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
+ data.cis)) {
/* Update CIS */
qos->ucast.cis = data.cis;
- cis_add(&data, qos);
+ break;
}
}
- if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
+ if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
return false;
- pdu = kmemdup(&data.pdu, sizeof(*pdu), GFP_KERNEL);
- if (!pdu)
- return false;
-
- if (hci_cmd_sync_queue(hdev, set_cig_params_sync, pdu,
- set_cig_params_complete) < 0) {
- kfree(pdu);
+done:
+ if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
+ UINT_PTR(qos->ucast.cig), NULL) < 0)
return false;
- }
return true;
}
@@ -1888,6 +1866,8 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-ENOMEM);
cis->cleanup = cis_cleanup;
cis->dst_type = dst_type;
+ cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
+ cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
}
if (cis->state == BT_CONNECTED)
@@ -1931,6 +1911,8 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-EINVAL);
}
+ hci_conn_hold(cis);
+
cis->iso_qos = *qos;
cis->state = BT_BOUND;
@@ -1969,59 +1951,47 @@ bool hci_iso_setup_path(struct hci_conn *conn)
return true;
}
-static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
+int hci_conn_check_create_cis(struct hci_conn *conn)
{
- return hci_le_create_cis_sync(hdev, data);
-}
+ if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
+ return -EINVAL;
-int hci_le_create_cis(struct hci_conn *conn)
-{
- struct hci_conn *cis;
- struct hci_link *link, *t;
- struct hci_dev *hdev = conn->hdev;
- int err;
+ if (!conn->parent || conn->parent->state != BT_CONNECTED ||
+ conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
+ return 1;
- bt_dev_dbg(hdev, "hcon %p", conn);
+ return 0;
+}
- switch (conn->type) {
- case LE_LINK:
- if (conn->state != BT_CONNECTED || list_empty(&conn->link_list))
- return -EINVAL;
+static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_le_create_cis_sync(hdev);
+}
- cis = NULL;
+int hci_le_create_cis_pending(struct hci_dev *hdev)
+{
+ struct hci_conn *conn;
+ bool pending = false;
- /* hci_conn_link uses list_add_tail_rcu so the list is in
- * the same order as the connections are requested.
- */
- list_for_each_entry_safe(link, t, &conn->link_list, list) {
- if (link->conn->state == BT_BOUND) {
- err = hci_le_create_cis(link->conn);
- if (err)
- return err;
+ rcu_read_lock();
- cis = link->conn;
- }
+ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+ if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
+ rcu_read_unlock();
+ return -EBUSY;
}
- return cis ? 0 : -EINVAL;
- case ISO_LINK:
- cis = conn;
- break;
- default:
- return -EINVAL;
+ if (!hci_conn_check_create_cis(conn))
+ pending = true;
}
- if (cis->state == BT_CONNECT)
+ rcu_read_unlock();
+
+ if (!pending)
return 0;
/* Queue Create CIS */
- err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
- if (err)
- return err;
-
- cis->state = BT_CONNECT;
-
- return 0;
+ return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
}
static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
@@ -2051,16 +2021,6 @@ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
qos->latency = conn->le_conn_latency;
}
-static void hci_bind_bis(struct hci_conn *conn,
- struct bt_iso_qos *qos)
-{
- /* Update LINK PHYs according to QoS preference */
- conn->le_tx_phy = qos->bcast.out.phy;
- conn->le_tx_phy = qos->bcast.out.phy;
- conn->iso_qos = *qos;
- conn->state = BT_BOUND;
-}
-
static int create_big_sync(struct hci_dev *hdev, void *data)
{
struct hci_conn *conn = data;
@@ -2183,27 +2143,80 @@ static void create_big_complete(struct hci_dev *hdev, void *data, int err)
}
}
-struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
- __u8 dst_type, struct bt_iso_qos *qos,
- __u8 base_len, __u8 *base)
+struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ struct bt_iso_qos *qos,
+ __u8 base_len, __u8 *base)
{
struct hci_conn *conn;
- int err;
+ __u8 eir[HCI_MAX_PER_AD_LENGTH];
+
+ if (base_len && base)
+ base_len = eir_append_service_data(eir, 0, 0x1851,
+ base, base_len);
/* We need hci_conn object using the BDADDR_ANY as dst */
- conn = hci_add_bis(hdev, dst, qos);
+ conn = hci_add_bis(hdev, dst, qos, base_len, eir);
if (IS_ERR(conn))
return conn;
- hci_bind_bis(conn, qos);
+ /* Update LINK PHYs according to QoS preference */
+ conn->le_tx_phy = qos->bcast.out.phy;
+ conn->le_tx_phy = qos->bcast.out.phy;
/* Add Basic Announcement into Peridic Adv Data if BASE is set */
if (base_len && base) {
- base_len = eir_append_service_data(conn->le_per_adv_data, 0,
- 0x1851, base, base_len);
+ memcpy(conn->le_per_adv_data, eir, sizeof(eir));
conn->le_per_adv_data_len = base_len;
}
+ hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
+ conn->le_tx_phy ? conn->le_tx_phy :
+ hdev->le_tx_def_phys);
+
+ conn->iso_qos = *qos;
+ conn->state = BT_BOUND;
+
+ return conn;
+}
+
+static void bis_mark_per_adv(struct hci_conn *conn, void *data)
+{
+ struct iso_list_data *d = data;
+
+ /* Skip if not broadcast/ANY address */
+ if (bacmp(&conn->dst, BDADDR_ANY))
+ return;
+
+ if (d->big != conn->iso_qos.bcast.big ||
+ d->bis == BT_ISO_QOS_BIS_UNSET ||
+ d->bis != conn->iso_qos.bcast.bis)
+ return;
+
+ set_bit(HCI_CONN_PER_ADV, &conn->flags);
+}
+
+struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos,
+ __u8 base_len, __u8 *base)
+{
+ struct hci_conn *conn;
+ int err;
+ struct iso_list_data data;
+
+ conn = hci_bind_bis(hdev, dst, qos, base_len, base);
+ if (IS_ERR(conn))
+ return conn;
+
+ data.big = qos->bcast.big;
+ data.bis = qos->bcast.bis;
+
+ /* Set HCI_CONN_PER_ADV for all bound connections, to mark that
+ * the start periodic advertising and create BIG commands have
+ * been queued
+ */
+ hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
+ BT_BOUND, &data);
+
/* Queue start periodic advertising and create BIG */
err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
create_big_complete);
@@ -2212,10 +2225,6 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(err);
}
- hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
- conn->le_tx_phy ? conn->le_tx_phy :
- hdev->le_tx_def_phys);
-
return conn;
}
@@ -2257,11 +2266,12 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-ENOLINK);
}
- /* If LE is already connected and CIS handle is already set proceed to
- * Create CIS immediately.
- */
- if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
- hci_le_create_cis(cis);
+ /* Link takes the refcount */
+ hci_conn_drop(cis);
+
+ cis->state = BT_CONNECT;
+
+ hci_le_create_cis_pending(hdev);
return cis;
}
@@ -2848,81 +2858,49 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
return phys;
}
-int hci_abort_conn(struct hci_conn *conn, u8 reason)
+static int abort_conn_sync(struct hci_dev *hdev, void *data)
{
- int r = 0;
+ struct hci_conn *conn;
+ u16 handle = PTR_UINT(data);
- if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
+ conn = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!conn)
return 0;
- switch (conn->state) {
- case BT_CONNECTED:
- case BT_CONFIG:
- if (conn->type == AMP_LINK) {
- struct hci_cp_disconn_phy_link cp;
+ return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
+}
- cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
- cp.reason = reason;
- r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
- sizeof(cp), &cp);
- } else {
- struct hci_cp_disconnect dc;
+int hci_abort_conn(struct hci_conn *conn, u8 reason)
+{
+ struct hci_dev *hdev = conn->hdev;
- dc.handle = cpu_to_le16(conn->handle);
- dc.reason = reason;
- r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
- sizeof(dc), &dc);
- }
+ /* If abort_reason has already been set it means the connection is
+ * already being aborted so don't attempt to overwrite it.
+ */
+ if (conn->abort_reason)
+ return 0;
- conn->state = BT_DISCONN;
+ bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
- break;
- case BT_CONNECT:
- if (conn->type == LE_LINK) {
- if (test_bit(HCI_CONN_SCANNING, &conn->flags))
- break;
- r = hci_send_cmd(conn->hdev,
- HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
- } else if (conn->type == ACL_LINK) {
- if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
- break;
- r = hci_send_cmd(conn->hdev,
- HCI_OP_CREATE_CONN_CANCEL,
- 6, &conn->dst);
- }
- break;
- case BT_CONNECT2:
- if (conn->type == ACL_LINK) {
- struct hci_cp_reject_conn_req rej;
-
- bacpy(&rej.bdaddr, &conn->dst);
- rej.reason = reason;
-
- r = hci_send_cmd(conn->hdev,
- HCI_OP_REJECT_CONN_REQ,
- sizeof(rej), &rej);
- } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
- struct hci_cp_reject_sync_conn_req rej;
-
- bacpy(&rej.bdaddr, &conn->dst);
-
- /* SCO rejection has its own limited set of
- * allowed error values (0x0D-0x0F) which isn't
- * compatible with most values passed to this
- * function. To be safe hard-code one of the
- * values that's suitable for SCO.
- */
- rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
+ conn->abort_reason = reason;
- r = hci_send_cmd(conn->hdev,
- HCI_OP_REJECT_SYNC_CONN_REQ,
- sizeof(rej), &rej);
+ /* If the connection is pending check the command opcode since that
+ * might be blocking on hci_cmd_sync_work while waiting its respective
+ * event so we need to hci_cmd_sync_cancel to cancel it.
+ *
+ * hci_connect_le serializes the connection attempts so only one
+ * connection can be in BT_CONNECT at time.
+ */
+ if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
+ switch (hci_skb_event(hdev->sent_cmd)) {
+ case HCI_EV_LE_CONN_COMPLETE:
+ case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
+ case HCI_EVT_LE_CIS_ESTABLISHED:
+ hci_cmd_sync_cancel(hdev, -ECANCELED);
+ break;
}
- break;
- default:
- conn->state = BT_CLOSED;
- break;
}
- return r;
+ return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
+ NULL);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 1ec83985f1ab..a5992f1b3c9b 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1074,9 +1074,9 @@ void hci_uuids_clear(struct hci_dev *hdev)
void hci_link_keys_clear(struct hci_dev *hdev)
{
- struct link_key *key;
+ struct link_key *key, *tmp;
- list_for_each_entry(key, &hdev->link_keys, list) {
+ list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
list_del_rcu(&key->list);
kfree_rcu(key, rcu);
}
@@ -1084,9 +1084,9 @@ void hci_link_keys_clear(struct hci_dev *hdev)
void hci_smp_ltks_clear(struct hci_dev *hdev)
{
- struct smp_ltk *k;
+ struct smp_ltk *k, *tmp;
- list_for_each_entry(k, &hdev->long_term_keys, list) {
+ list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
list_del_rcu(&k->list);
kfree_rcu(k, rcu);
}
@@ -1094,9 +1094,9 @@ void hci_smp_ltks_clear(struct hci_dev *hdev)
void hci_smp_irks_clear(struct hci_dev *hdev)
{
- struct smp_irk *k;
+ struct smp_irk *k, *tmp;
- list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
+ list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
list_del_rcu(&k->list);
kfree_rcu(k, rcu);
}
@@ -1104,9 +1104,9 @@ void hci_smp_irks_clear(struct hci_dev *hdev)
void hci_blocked_keys_clear(struct hci_dev *hdev)
{
- struct blocked_key *b;
+ struct blocked_key *b, *tmp;
- list_for_each_entry(b, &hdev->blocked_keys, list) {
+ list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
list_del_rcu(&b->list);
kfree_rcu(b, rcu);
}
@@ -1949,15 +1949,15 @@ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
switch (hci_get_adv_monitor_offload_ext(hdev)) {
case HCI_ADV_MONITOR_EXT_NONE:
- bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
+ bt_dev_dbg(hdev, "add monitor %d status %d",
monitor->handle, status);
/* Message was not forwarded to controller - not an error */
break;
case HCI_ADV_MONITOR_EXT_MSFT:
status = msft_add_monitor_pattern(hdev, monitor);
- bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
- monitor->handle, status);
+ bt_dev_dbg(hdev, "add monitor %d msft status %d",
+ handle, status);
break;
}
@@ -1976,15 +1976,15 @@ static int hci_remove_adv_monitor(struct hci_dev *hdev,
switch (hci_get_adv_monitor_offload_ext(hdev)) {
case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
- bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
+ bt_dev_dbg(hdev, "remove monitor %d status %d",
monitor->handle, status);
goto free_monitor;
case HCI_ADV_MONITOR_EXT_MSFT:
handle = monitor->handle;
status = msft_remove_monitor(hdev, monitor);
- bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
- hdev->name, handle, status);
+ bt_dev_dbg(hdev, "remove monitor %d msft status %d",
+ handle, status);
break;
}
@@ -2436,6 +2436,9 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return NOTIFY_DONE;
+ /* To avoid a potential race with hci_unregister_dev. */
+ hci_dev_hold(hdev);
+
if (action == PM_SUSPEND_PREPARE)
ret = hci_suspend_dev(hdev);
else if (action == PM_POST_SUSPEND)
@@ -2445,6 +2448,7 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
action, ret);
+ hci_dev_put(hdev);
return NOTIFY_DONE;
}
@@ -3891,7 +3895,7 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
if (conn) {
/* Send to upper protocol */
- bt_cb(skb)->sco.pkt_status = flags & 0x03;
+ hci_skb_pkt_status(skb) = flags & 0x03;
sco_recv_scodata(conn, skb);
return;
} else {
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index ec0df2f9188e..6b7741f6e95b 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -22,6 +22,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/kstrtox.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -1152,7 +1153,7 @@ static ssize_t force_no_mitm_write(struct file *file,
return -EFAULT;
buf[buf_size] = '\0';
- if (strtobool(buf, &enable))
+ if (kstrtobool(buf, &enable))
return -EINVAL;
if (enable == hci_dev_test_flag(hdev, HCI_FORCE_NO_MITM))
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 31ca320ce38d..559b6080706c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1639,7 +1639,7 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
hci_dev_set_flag(hdev, HCI_LE_ADV);
- if (adv)
+ if (adv && !adv->periodic)
adv->enabled = true;
conn = hci_lookup_le_connect(hdev);
@@ -1747,7 +1747,7 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
{
struct discovery_state *d = &hdev->discovery;
- if (len > HCI_MAX_AD_LENGTH)
+ if (len > max_adv_len(hdev))
return;
bacpy(&d->last_adv_addr, bdaddr);
@@ -3173,19 +3173,15 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
* As the connection handle is set here for the first time, it indicates
* whether the connection is already set up.
*/
- if (conn->handle != HCI_CONN_HANDLE_UNSET) {
+ if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
goto unlock;
}
if (!status) {
- conn->handle = __le16_to_cpu(ev->handle);
- if (conn->handle > HCI_CONN_HANDLE_MAX) {
- bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
- conn->handle, HCI_CONN_HANDLE_MAX);
- status = HCI_ERROR_INVALID_PARAMETERS;
+ status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
+ if (status)
goto done;
- }
if (conn->type == ACL_LINK) {
conn->state = BT_CONFIG;
@@ -3803,6 +3799,22 @@ static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
return rp->status;
}
+static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
+{
+ struct hci_conn *conn, *tmp;
+
+ lockdep_assert_held(&hdev->lock);
+
+ list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
+ if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
+ conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
+ continue;
+
+ if (HCI_CONN_HANDLE_UNSET(conn->handle))
+ hci_conn_failed(conn, status);
+ }
+}
+
static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -3810,6 +3822,7 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
struct hci_cp_le_set_cig_params *cp;
struct hci_conn *conn;
u8 status = rp->status;
+ bool pending = false;
int i;
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
@@ -3823,12 +3836,15 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
+ /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
+ *
+ * If the Status return parameter is non-zero, then the state of the CIG
+ * and its CIS configurations shall not be changed by the command. If
+ * the CIG did not already exist, it shall not be created.
+ */
if (status) {
- while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
- conn->state = BT_CLOSED;
- hci_connect_cfm(conn, status);
- hci_conn_del(conn);
- }
+ /* Keep current configuration, fail only the unbound CIS */
+ hci_unbound_cis_failed(hdev, rp->cig_id, status);
goto unlock;
}
@@ -3848,17 +3864,17 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
continue;
- conn->handle = __le16_to_cpu(rp->handle[i]);
-
- bt_dev_dbg(hdev, "%p handle 0x%4.4x parent %p", conn,
- conn->handle, conn->parent);
+ if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
+ continue;
- /* Create CIS if LE is already connected */
- if (conn->parent && conn->parent->state == BT_CONNECTED)
- hci_le_create_cis(conn);
+ if (conn->state == BT_CONNECT)
+ pending = true;
}
unlock:
+ if (pending)
+ hci_le_create_cis_pending(hdev);
+
hci_dev_unlock(hdev);
return rp->status;
@@ -3938,24 +3954,47 @@ static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_ev_status *rp = data;
- __u8 *sent;
+ struct hci_cp_le_set_per_adv_enable *cp;
+ struct adv_info *adv = NULL, *n;
+ u8 per_adv_cnt = 0;
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
return rp->status;
- sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
- if (!sent)
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
+ if (!cp)
return rp->status;
hci_dev_lock(hdev);
- if (*sent)
+ adv = hci_find_adv_instance(hdev, cp->handle);
+
+ if (cp->enable) {
hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
- else
+
+ if (adv)
+ adv->enabled = true;
+ } else {
+ /* If just one instance was disabled check if there are
+ * any other instance enabled before clearing HCI_LE_PER_ADV.
+ * The current periodic adv instance will be marked as
+ * disabled once extended advertising is also disabled.
+ */
+ list_for_each_entry_safe(adv, n, &hdev->adv_instances,
+ list) {
+ if (adv->periodic && adv->enabled)
+ per_adv_cnt++;
+ }
+
+ if (per_adv_cnt > 1)
+ goto unlock;
+
hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
+ }
+unlock:
hci_dev_unlock(hdev);
return rp->status;
@@ -4224,6 +4263,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
{
struct hci_cp_le_create_cis *cp;
+ bool pending = false;
int i;
bt_dev_dbg(hdev, "status 0x%2.2x", status);
@@ -4246,12 +4286,18 @@ static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
conn = hci_conn_hash_lookup_handle(hdev, handle);
if (conn) {
+ if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
+ &conn->flags))
+ pending = true;
conn->state = BT_CLOSED;
hci_connect_cfm(conn, status);
hci_conn_del(conn);
}
}
+ if (pending)
+ hci_le_create_cis_pending(hdev);
+
hci_dev_unlock(hdev);
}
@@ -4999,18 +5045,15 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
* As the connection handle is set here for the first time, it indicates
* whether the connection is already set up.
*/
- if (conn->handle != HCI_CONN_HANDLE_UNSET) {
+ if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
goto unlock;
}
switch (status) {
case 0x00:
- conn->handle = __le16_to_cpu(ev->handle);
- if (conn->handle > HCI_CONN_HANDLE_MAX) {
- bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
- conn->handle, HCI_CONN_HANDLE_MAX);
- status = HCI_ERROR_INVALID_PARAMETERS;
+ status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
+ if (status) {
conn->state = BT_CLOSED;
break;
}
@@ -5863,7 +5906,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
* As the connection handle is set here for the first time, it indicates
* whether the connection is already set up.
*/
- if (conn->handle != HCI_CONN_HANDLE_UNSET) {
+ if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
goto unlock;
}
@@ -6216,8 +6259,9 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
return;
}
- if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
- bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
+ if (len > max_adv_len(hdev)) {
+ bt_dev_err_ratelimited(hdev,
+ "adv larger than maximum supported");
return;
}
@@ -6282,7 +6326,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
*/
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
type);
- if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
+ if (!ext_adv && conn && type == LE_ADV_IND &&
+ len <= max_adv_len(hdev)) {
/* Store report for later inclusion by
* mgmt_device_connected
*/
@@ -6423,7 +6468,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
info->length + 1))
break;
- if (info->length <= HCI_MAX_AD_LENGTH) {
+ if (info->length <= max_adv_len(hdev)) {
rssi = info->data[info->length];
process_adv_report(hdev, info->type, &info->bdaddr,
info->bdaddr_type, NULL, 0, rssi,
@@ -6790,6 +6835,7 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
struct hci_evt_le_cis_established *ev = data;
struct hci_conn *conn;
struct bt_iso_qos *qos;
+ bool pending = false;
u16 handle = __le16_to_cpu(ev->handle);
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
@@ -6813,6 +6859,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
qos = &conn->iso_qos;
+ pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
+
/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
qos->ucast.out.interval = qos->ucast.in.interval;
@@ -6854,10 +6902,14 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
goto unlock;
}
+ conn->state = BT_CLOSED;
hci_connect_cfm(conn, ev->status);
hci_conn_del(conn);
unlock:
+ if (pending)
+ hci_le_create_cis_pending(hdev);
+
hci_dev_unlock(hdev);
}
@@ -6936,6 +6988,7 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
{
struct hci_evt_le_create_big_complete *ev = data;
struct hci_conn *conn;
+ __u8 i = 0;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
@@ -6944,33 +6997,46 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
return;
hci_dev_lock(hdev);
+ rcu_read_lock();
- conn = hci_conn_hash_lookup_big(hdev, ev->handle);
- if (!conn)
- goto unlock;
+ /* Connect all BISes that are bound to the BIG */
+ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+ if (bacmp(&conn->dst, BDADDR_ANY) ||
+ conn->type != ISO_LINK ||
+ conn->iso_qos.bcast.big != ev->handle)
+ continue;
- if (conn->type != ISO_LINK) {
- bt_dev_err(hdev,
- "Invalid connection link type handle 0x%2.2x",
- ev->handle);
- goto unlock;
- }
+ if (hci_conn_set_handle(conn,
+ __le16_to_cpu(ev->bis_handle[i++])))
+ continue;
- if (ev->num_bis)
- conn->handle = __le16_to_cpu(ev->bis_handle[0]);
+ if (!ev->status) {
+ conn->state = BT_CONNECTED;
+ set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
+ rcu_read_unlock();
+ hci_debugfs_create_conn(conn);
+ hci_conn_add_sysfs(conn);
+ hci_iso_setup_path(conn);
+ rcu_read_lock();
+ continue;
+ }
- if (!ev->status) {
- conn->state = BT_CONNECTED;
- hci_debugfs_create_conn(conn);
- hci_conn_add_sysfs(conn);
- hci_iso_setup_path(conn);
- goto unlock;
+ hci_connect_cfm(conn, ev->status);
+ rcu_read_unlock();
+ hci_conn_del(conn);
+ rcu_read_lock();
}
- hci_connect_cfm(conn, ev->status);
- hci_conn_del(conn);
+ if (!ev->status && !i)
+ /* If no BISes have been connected for the BIG,
+ * terminate. This is in case all bound connections
+ * have been closed before the BIG creation
+ * has completed.
+ */
+ hci_le_terminate_big_sync(hdev, ev->handle,
+ HCI_ERROR_LOCAL_HOST_TERM);
-unlock:
+ rcu_read_unlock();
hci_dev_unlock(hdev);
}
@@ -6987,9 +7053,6 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
flex_array_size(ev, bis, ev->num_bis)))
return;
- if (ev->status)
- return;
-
hci_dev_lock(hdev);
for (i = 0; i < ev->num_bis; i++) {
@@ -7013,9 +7076,25 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
- hci_iso_setup_path(bis);
+ if (!ev->status) {
+ set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
+ hci_iso_setup_path(bis);
+ }
}
+ /* In case BIG sync failed, notify each failed connection to
+ * the user after all hci connections have been added
+ */
+ if (ev->status)
+ for (i = 0; i < ev->num_bis; i++) {
+ u16 handle = le16_to_cpu(ev->bis[i]);
+
+ bis = hci_conn_hash_lookup_handle(hdev, handle);
+
+ set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
+ hci_connect_cfm(bis, ev->status);
+ }
+
hci_dev_unlock(hdev);
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index f7e006a36382..6e023b0104b0 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -629,27 +629,6 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
}
}
-/* Returns true if an le connection is in the scanning state */
-static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
-{
- struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *c;
-
- rcu_read_lock();
-
- list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type == LE_LINK && c->state == BT_CONNECT &&
- test_bit(HCI_CONN_SCANNING, &c->flags)) {
- rcu_read_unlock();
- return true;
- }
- }
-
- rcu_read_unlock();
-
- return false;
-}
-
static void set_random_addr(struct hci_request *req, bdaddr_t *rpa);
static int hci_update_random_address(struct hci_request *req,
bool require_privacy, bool use_rpa,
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 1d249d839819..5e4f718073b7 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -264,6 +264,53 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
kfree_skb(skb_copy);
}
+static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
+{
+ struct scm_creds *creds;
+
+ if (!sk || WARN_ON(!skb))
+ return;
+
+ creds = &bt_cb(skb)->creds;
+
+ /* Check if peer credentials is set */
+ if (!sk->sk_peer_pid) {
+ /* Check if parent peer credentials is set */
+ if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
+ sk = bt_sk(sk)->parent;
+ else
+ return;
+ }
+
+ /* Check if scm_creds already set */
+ if (creds->pid == pid_vnr(sk->sk_peer_pid))
+ return;
+
+ memset(creds, 0, sizeof(*creds));
+
+ creds->pid = pid_vnr(sk->sk_peer_pid);
+ if (sk->sk_peer_cred) {
+ creds->uid = sk->sk_peer_cred->uid;
+ creds->gid = sk->sk_peer_cred->gid;
+ }
+}
+
+static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+
+ if (!skb)
+ return NULL;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ hci_sock_copy_creds(skb->sk, nskb);
+
+ return nskb;
+}
+
/* Send frame to sockets with specific channel */
static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
int flag, struct sock *skip_sk)
@@ -289,7 +336,7 @@ static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
if (hci_pi(sk)->channel != channel)
continue;
- nskb = skb_clone(skb, GFP_ATOMIC);
+ nskb = hci_skb_clone(skb);
if (!nskb)
continue;
@@ -356,6 +403,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
if (!skb_copy)
return;
+ hci_sock_copy_creds(skb->sk, skb_copy);
+
/* Put header before the data */
hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
hdr->opcode = opcode;
@@ -531,10 +580,12 @@ static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
return NULL;
}
- skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
+ skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
if (!skb)
return NULL;
+ hci_sock_copy_creds(sk, skb);
+
flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
@@ -580,6 +631,8 @@ static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
if (!skb)
return NULL;
+ hci_sock_copy_creds(sk, skb);
+
put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
__net_timestamp(skb);
@@ -606,6 +659,8 @@ static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
if (!skb)
return NULL;
+ hci_sock_copy_creds(sk, skb);
+
put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
put_unaligned_le16(opcode, skb_put(skb, 2));
@@ -638,6 +693,8 @@ send_monitor_note(struct sock *sk, const char *fmt, ...)
if (!skb)
return;
+ hci_sock_copy_creds(sk, skb);
+
va_start(args, fmt);
vsprintf(skb_put(skb, len), fmt, args);
*(u8 *)skb_put(skb, 1) = 0;
@@ -1494,6 +1551,7 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
+ struct scm_cookie scm;
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
@@ -1538,11 +1596,16 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
break;
}
+ memset(&scm, 0, sizeof(scm));
+ scm.creds = bt_cb(skb)->creds;
+
skb_free_datagram(sk, skb);
if (flags & MSG_TRUNC)
copied = skblen;
+ scm_recv(sock, msg, &scm, flags);
+
return err ? : copied;
}
@@ -2143,18 +2206,12 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
sock->ops = &hci_sock_ops;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
+ sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
+ kern);
if (!sk)
return -ENOMEM;
- sock_init_data(sock, sk);
-
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- sk->sk_protocol = protocol;
-
sock->state = SS_UNCONNECTED;
- sk->sk_state = BT_OPEN;
sk->sk_destruct = hci_sock_destruct;
bt_sock_link(&hci_sk_list, sk);
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 4d1e32bb6a9c..5eb30ba21370 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -3,6 +3,7 @@
* BlueZ - Bluetooth protocol stack for Linux
*
* Copyright (C) 2021 Intel Corporation
+ * Copyright 2023 NXP
*/
#include <linux/property.h>
@@ -1319,9 +1320,11 @@ int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
static int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
{
struct hci_cp_le_set_per_adv_enable cp;
+ struct adv_info *adv = NULL;
/* If periodic advertising already disabled there is nothing to do. */
- if (!hci_dev_test_flag(hdev, HCI_LE_PER_ADV))
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv || !adv->periodic || !adv->enabled)
return 0;
memset(&cp, 0, sizeof(cp));
@@ -1386,9 +1389,11 @@ static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
{
struct hci_cp_le_set_per_adv_enable cp;
+ struct adv_info *adv = NULL;
/* If periodic advertising already enabled there is nothing to do. */
- if (hci_dev_test_flag(hdev, HCI_LE_PER_ADV))
+ adv = hci_find_adv_instance(hdev, instance);
+ if (adv && adv->periodic && adv->enabled)
return 0;
memset(&cp, 0, sizeof(cp));
@@ -1458,22 +1463,19 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
sync_interval);
if (IS_ERR(adv))
return PTR_ERR(adv);
+ adv->pending = false;
added = true;
}
}
- /* Only start advertising if instance 0 or if a dedicated instance has
- * been added.
- */
- if (!adv || added) {
- err = hci_start_ext_adv_sync(hdev, instance);
- if (err < 0)
- goto fail;
+ /* Start advertising */
+ err = hci_start_ext_adv_sync(hdev, instance);
+ if (err < 0)
+ goto fail;
- err = hci_adv_bcast_annoucement(hdev, adv);
- if (err < 0)
- goto fail;
- }
+ err = hci_adv_bcast_annoucement(hdev, adv);
+ if (err < 0)
+ goto fail;
err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
max_interval);
@@ -2670,27 +2672,6 @@ done:
return filter_policy;
}
-/* Returns true if an le connection is in the scanning state */
-static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
-{
- struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *c;
-
- rcu_read_lock();
-
- list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type == LE_LINK && c->state == BT_CONNECT &&
- test_bit(HCI_CONN_SCANNING, &c->flags)) {
- rcu_read_unlock();
- return true;
- }
- }
-
- rcu_read_unlock();
-
- return false;
-}
-
static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
u16 interval, u16 window,
u8 own_addr_type, u8 filter_policy)
@@ -4133,10 +4114,13 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
}
if (bis_capable(hdev)) {
+ events[1] |= 0x20; /* LE PA Report */
+ events[1] |= 0x40; /* LE PA Sync Established */
events[3] |= 0x04; /* LE Create BIG Complete */
events[3] |= 0x08; /* LE Terminate BIG Complete */
events[3] |= 0x10; /* LE BIG Sync Established */
events[3] |= 0x20; /* LE BIG Sync Loss */
+ events[4] |= 0x02; /* LE BIG Info Advertising Report */
}
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
@@ -5269,26 +5253,64 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
}
static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
- struct hci_conn *conn)
+ struct hci_conn *conn, u8 reason)
{
+ /* Return reason if scanning since the connection shall probably be
+ * cleanup directly.
+ */
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
- return 0;
+ return reason;
- if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
+ if (conn->role == HCI_ROLE_SLAVE ||
+ test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
return 0;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
0, NULL, HCI_CMD_TIMEOUT);
}
-static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
+static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
{
if (conn->type == LE_LINK)
- return hci_le_connect_cancel_sync(hdev, conn);
+ return hci_le_connect_cancel_sync(hdev, conn, reason);
+
+ if (conn->type == ISO_LINK) {
+ /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+ * page 1857:
+ *
+ * If this command is issued for a CIS on the Central and the
+ * CIS is successfully terminated before being established,
+ * then an HCI_LE_CIS_Established event shall also be sent for
+ * this CIS with the Status Operation Cancelled by Host (0x44).
+ */
+ if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
+ return hci_disconnect_sync(hdev, conn, reason);
+
+ /* CIS with no Create CIS sent have nothing to cancel */
+ if (bacmp(&conn->dst, BDADDR_ANY))
+ return HCI_ERROR_LOCAL_HOST_TERM;
+
+ /* There is no way to cancel a BIS without terminating the BIG
+ * which is done later on connection cleanup.
+ */
+ return 0;
+ }
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
return 0;
+ /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
+ * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
+ * used when suspending or powering off, where we don't want to wait
+ * for the peer's response.
+ */
+ if (reason != HCI_ERROR_REMOTE_POWER_OFF)
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
+ 6, &conn->dst,
+ HCI_EV_CONN_COMPLETE,
+ HCI_CMD_TIMEOUT, NULL);
+
return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
6, &conn->dst, HCI_CMD_TIMEOUT);
}
@@ -5312,11 +5334,27 @@ static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
+static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
+{
+ struct hci_cp_le_reject_cis cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.reason = reason;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
u8 reason)
{
struct hci_cp_reject_conn_req cp;
+ if (conn->type == ISO_LINK)
+ return hci_le_reject_cis_sync(hdev, conn, reason);
+
if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
return hci_reject_sco_sync(hdev, conn, reason);
@@ -5330,31 +5368,52 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
{
- int err;
+ int err = 0;
+ u16 handle = conn->handle;
switch (conn->state) {
case BT_CONNECTED:
case BT_CONFIG:
- return hci_disconnect_sync(hdev, conn, reason);
+ err = hci_disconnect_sync(hdev, conn, reason);
+ break;
case BT_CONNECT:
- err = hci_connect_cancel_sync(hdev, conn);
- /* Cleanup hci_conn object if it cannot be cancelled as it
- * likelly means the controller and host stack are out of sync.
- */
- if (err) {
- hci_dev_lock(hdev);
- hci_conn_failed(conn, err);
- hci_dev_unlock(hdev);
- }
- return err;
+ err = hci_connect_cancel_sync(hdev, conn, reason);
+ break;
case BT_CONNECT2:
- return hci_reject_conn_sync(hdev, conn, reason);
+ err = hci_reject_conn_sync(hdev, conn, reason);
+ break;
+ case BT_OPEN:
+ case BT_BOUND:
+ hci_dev_lock(hdev);
+ hci_conn_failed(conn, reason);
+ hci_dev_unlock(hdev);
+ return 0;
default:
conn->state = BT_CLOSED;
- break;
+ return 0;
}
- return 0;
+ /* Cleanup hci_conn object if it cannot be cancelled as it
+ * likelly means the controller and host stack are out of sync
+ * or in case of LE it was still scanning so it can be cleanup
+ * safely.
+ */
+ if (err) {
+ struct hci_conn *c;
+
+ /* Check if the connection hasn't been cleanup while waiting
+ * commands to complete.
+ */
+ c = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!c || c != conn)
+ return 0;
+
+ hci_dev_lock(hdev);
+ hci_conn_failed(conn, err);
+ hci_dev_unlock(hdev);
+ }
+
+ return err;
}
static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
@@ -6253,63 +6312,99 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
done:
if (err == -ETIMEDOUT)
- hci_le_connect_cancel_sync(hdev, conn);
+ hci_le_connect_cancel_sync(hdev, conn, 0x00);
/* Re-enable advertising after the connection attempt is finished. */
hci_resume_advertising_sync(hdev);
return err;
}
-int hci_le_create_cis_sync(struct hci_dev *hdev, struct hci_conn *conn)
+int hci_le_create_cis_sync(struct hci_dev *hdev)
{
struct {
struct hci_cp_le_create_cis cp;
struct hci_cis cis[0x1f];
} cmd;
- u8 cig;
- struct hci_conn *hcon = conn;
+ struct hci_conn *conn;
+ u8 cig = BT_ISO_QOS_CIG_UNSET;
+
+ /* The spec allows only one pending LE Create CIS command at a time. If
+ * the command is pending now, don't do anything. We check for pending
+ * connections after each CIS Established event.
+ *
+ * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+ * page 2566:
+ *
+ * If the Host issues this command before all the
+ * HCI_LE_CIS_Established events from the previous use of the
+ * command have been generated, the Controller shall return the
+ * error code Command Disallowed (0x0C).
+ *
+ * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
+ * page 2567:
+ *
+ * When the Controller receives the HCI_LE_Create_CIS command, the
+ * Controller sends the HCI_Command_Status event to the Host. An
+ * HCI_LE_CIS_Established event will be generated for each CIS when it
+ * is established or if it is disconnected or considered lost before
+ * being established; until all the events are generated, the command
+ * remains pending.
+ */
memset(&cmd, 0, sizeof(cmd));
- cmd.cis[0].acl_handle = cpu_to_le16(conn->parent->handle);
- cmd.cis[0].cis_handle = cpu_to_le16(conn->handle);
- cmd.cp.num_cis++;
- cig = conn->iso_qos.ucast.cig;
hci_dev_lock(hdev);
rcu_read_lock();
+ /* Wait until previous Create CIS has completed */
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
+ if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
+ goto done;
+ }
- if (conn == hcon || conn->type != ISO_LINK ||
- conn->state == BT_CONNECTED ||
- conn->iso_qos.ucast.cig != cig)
+ /* Find CIG with all CIS ready */
+ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+ struct hci_conn *link;
+
+ if (hci_conn_check_create_cis(conn))
continue;
- /* Check if all CIS(s) belonging to a CIG are ready */
- if (!conn->parent || conn->parent->state != BT_CONNECTED ||
- conn->state != BT_CONNECT) {
- cmd.cp.num_cis = 0;
- break;
+ cig = conn->iso_qos.ucast.cig;
+
+ list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
+ if (hci_conn_check_create_cis(link) > 0 &&
+ link->iso_qos.ucast.cig == cig &&
+ link->state != BT_CONNECTED) {
+ cig = BT_ISO_QOS_CIG_UNSET;
+ break;
+ }
}
- /* Group all CIS with state BT_CONNECT since the spec don't
- * allow to send them individually:
- *
- * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
- * page 2566:
- *
- * If the Host issues this command before all the
- * HCI_LE_CIS_Established events from the previous use of the
- * command have been generated, the Controller shall return the
- * error code Command Disallowed (0x0C).
- */
+ if (cig != BT_ISO_QOS_CIG_UNSET)
+ break;
+ }
+
+ if (cig == BT_ISO_QOS_CIG_UNSET)
+ goto done;
+
+ list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
+ struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
+
+ if (hci_conn_check_create_cis(conn) ||
+ conn->iso_qos.ucast.cig != cig)
+ continue;
+
+ set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
cis->acl_handle = cpu_to_le16(conn->parent->handle);
cis->cis_handle = cpu_to_le16(conn->handle);
cmd.cp.num_cis++;
+
+ if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis))
+ break;
}
+done:
rcu_read_unlock();
hci_dev_unlock(hdev);
@@ -6433,7 +6528,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
{
- u8 instance = PTR_ERR(data);
+ u8 instance = PTR_UINT(data);
return hci_update_adv_data_sync(hdev, instance);
}
@@ -6441,5 +6536,5 @@ static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
{
return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
- ERR_PTR(instance), NULL);
+ UINT_PTR(instance), NULL);
}
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 369ed92dac99..c93aaeb3a3fa 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -256,21 +256,13 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, kern);
+ sk = bt_sock_alloc(net, sock, &hidp_proto, protocol, GFP_ATOMIC, kern);
if (!sk)
return -ENOMEM;
- sock_init_data(sock, sk);
-
sock->ops = &hidp_sock_ops;
-
sock->state = SS_UNCONNECTED;
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- sk->sk_protocol = protocol;
- sk->sk_state = BT_OPEN;
-
bt_sock_link(&hidp_sk_list, sk);
return 0;
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 505d62247268..6b66d6a88b9a 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -48,6 +48,11 @@ static void iso_sock_kill(struct sock *sk);
#define EIR_SERVICE_DATA_LENGTH 4
#define BASE_MAX_LENGTH (HCI_MAX_PER_AD_LENGTH - EIR_SERVICE_DATA_LENGTH)
+/* iso_pinfo flags values */
+enum {
+ BT_SK_BIG_SYNC,
+};
+
struct iso_pinfo {
struct bt_sock bt;
bdaddr_t src;
@@ -58,7 +63,7 @@ struct iso_pinfo {
__u8 bc_num_bis;
__u8 bc_bis[ISO_MAX_NUM_BIS];
__u16 sync_handle;
- __u32 flags;
+ unsigned long flags;
struct bt_iso_qos qos;
bool qos_user_set;
__u8 base_len;
@@ -287,13 +292,24 @@ static int iso_connect_bis(struct sock *sk)
goto unlock;
}
- hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst,
- le_addr_type(iso_pi(sk)->dst_type),
- &iso_pi(sk)->qos, iso_pi(sk)->base_len,
- iso_pi(sk)->base);
- if (IS_ERR(hcon)) {
- err = PTR_ERR(hcon);
- goto unlock;
+ /* Just bind if DEFER_SETUP has been set */
+ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ hcon = hci_bind_bis(hdev, &iso_pi(sk)->dst,
+ &iso_pi(sk)->qos, iso_pi(sk)->base_len,
+ iso_pi(sk)->base);
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto unlock;
+ }
+ } else {
+ hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst,
+ le_addr_type(iso_pi(sk)->dst_type),
+ &iso_pi(sk)->qos, iso_pi(sk)->base_len,
+ iso_pi(sk)->base);
+ if (IS_ERR(hcon)) {
+ err = PTR_ERR(hcon);
+ goto unlock;
+ }
}
conn = iso_conn_add(hcon);
@@ -317,6 +333,9 @@ static int iso_connect_bis(struct sock *sk)
if (hcon->state == BT_CONNECTED) {
iso_sock_clear_timer(sk);
sk->sk_state = BT_CONNECTED;
+ } else if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
+ iso_sock_clear_timer(sk);
+ sk->sk_state = BT_CONNECT;
} else {
sk->sk_state = BT_CONNECT;
iso_sock_set_timer(sk, sk->sk_sndtimeo);
@@ -600,18 +619,6 @@ static void iso_sock_kill(struct sock *sk)
sock_put(sk);
}
-static void iso_conn_defer_reject(struct hci_conn *conn)
-{
- struct hci_cp_le_reject_cis cp;
-
- BT_DBG("conn %p", conn);
-
- memset(&cp, 0, sizeof(cp));
- cp.handle = cpu_to_le16(conn->handle);
- cp.reason = HCI_ERROR_REJ_BAD_ADDR;
- hci_send_cmd(conn->hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
-}
-
static void __iso_sock_close(struct sock *sk)
{
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
@@ -621,6 +628,7 @@ static void __iso_sock_close(struct sock *sk)
iso_sock_cleanup_listen(sk);
break;
+ case BT_CONNECT:
case BT_CONNECTED:
case BT_CONFIG:
if (iso_pi(sk)->conn->hcon) {
@@ -636,21 +644,6 @@ static void __iso_sock_close(struct sock *sk)
break;
case BT_CONNECT2:
- if (iso_pi(sk)->conn->hcon)
- iso_conn_defer_reject(iso_pi(sk)->conn->hcon);
- iso_chan_del(sk, ECONNRESET);
- break;
- case BT_CONNECT:
- /* In case of DEFER_SETUP the hcon would be bound to CIG which
- * needs to be removed so just call hci_conn_del so the cleanup
- * callback do what is needed.
- */
- if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) &&
- iso_pi(sk)->conn->hcon) {
- hci_conn_del(iso_pi(sk)->conn->hcon);
- iso_pi(sk)->conn->hcon = NULL;
- }
-
iso_chan_del(sk, ECONNRESET);
break;
case BT_DISCONN:
@@ -724,21 +717,13 @@ static struct sock *iso_sock_alloc(struct net *net, struct socket *sock,
{
struct sock *sk;
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &iso_proto, kern);
+ sk = bt_sock_alloc(net, sock, &iso_proto, proto, prio, kern);
if (!sk)
return NULL;
- sock_init_data(sock, sk);
- INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
-
sk->sk_destruct = iso_sock_destruct;
sk->sk_sndtimeo = ISO_CONN_TIMEOUT;
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- sk->sk_protocol = proto;
- sk->sk_state = BT_OPEN;
-
/* Set address type as public as default src address is BDADDR_ANY */
iso_pi(sk)->src_type = BDADDR_LE_PUBLIC;
@@ -1202,6 +1187,12 @@ static bool check_io_qos(struct bt_iso_io_qos *qos)
static bool check_ucast_qos(struct bt_iso_qos *qos)
{
+ if (qos->ucast.cig > 0xef && qos->ucast.cig != BT_ISO_QOS_CIG_UNSET)
+ return false;
+
+ if (qos->ucast.cis > 0xef && qos->ucast.cis != BT_ISO_QOS_CIS_UNSET)
+ return false;
+
if (qos->ucast.sca > 0x07)
return false;
@@ -1291,6 +1282,18 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
+ case BT_PKT_STATUS:
+ if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (opt)
+ set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
+ else
+ clear_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
+ break;
+
case BT_ISO_QOS:
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
sk->sk_state != BT_CONNECT2) {
@@ -1376,6 +1379,12 @@ static int iso_sock_getsockopt(struct socket *sock, int level, int optname,
break;
+ case BT_PKT_STATUS:
+ if (put_user(test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags),
+ (int __user *)optval))
+ err = -EFAULT;
+ break;
+
case BT_ISO_QOS:
qos = iso_sock_get_qos(sk);
@@ -1563,6 +1572,12 @@ static void iso_conn_ready(struct iso_conn *conn)
hci_conn_hold(hcon);
iso_chan_add(conn, sk, parent);
+ if (ev && ((struct hci_evt_le_big_sync_estabilished *)ev)->status) {
+ /* Trigger error signal on child socket */
+ sk->sk_err = ECONNREFUSED;
+ sk->sk_error_report(sk);
+ }
+
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
sk->sk_state = BT_CONNECT2;
else
@@ -1631,15 +1646,17 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
if (ev2->num_bis < iso_pi(sk)->bc_num_bis)
iso_pi(sk)->bc_num_bis = ev2->num_bis;
- err = hci_le_big_create_sync(hdev,
- &iso_pi(sk)->qos,
- iso_pi(sk)->sync_handle,
- iso_pi(sk)->bc_num_bis,
- iso_pi(sk)->bc_bis);
- if (err) {
- bt_dev_err(hdev, "hci_le_big_create_sync: %d",
- err);
- sk = NULL;
+ if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) {
+ err = hci_le_big_create_sync(hdev,
+ &iso_pi(sk)->qos,
+ iso_pi(sk)->sync_handle,
+ iso_pi(sk)->bc_num_bis,
+ iso_pi(sk)->bc_bis);
+ if (err) {
+ bt_dev_err(hdev, "hci_le_big_create_sync: %d",
+ err);
+ sk = NULL;
+ }
}
}
} else {
@@ -1676,13 +1693,18 @@ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
}
/* Create CIS if pending */
- hci_le_create_cis(hcon);
+ hci_le_create_cis_pending(hcon->hdev);
return;
}
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
- if (!status) {
+ /* Similar to the success case, if HCI_CONN_BIG_SYNC_FAILED is set,
+ * queue the failed bis connection into the accept queue of the
+ * listening socket and wake up userspace, to inform the user about
+ * the BIG sync failed event.
+ */
+ if (!status || test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) {
struct iso_conn *conn;
conn = iso_conn_add(hcon);
@@ -1757,6 +1779,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (len == skb->len) {
/* Complete frame received */
+ hci_skb_pkt_status(skb) = flags & 0x03;
iso_recv_frame(conn, skb);
return;
}
@@ -1778,6 +1801,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (!conn->rx_skb)
goto drop;
+ hci_skb_pkt_status(conn->rx_skb) = flags & 0x03;
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
skb->len);
conn->rx_len = len - skb->len;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 947ca580bb9a..3bdfc3f1e73d 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -178,21 +178,6 @@ done:
return err;
}
-static void l2cap_sock_init_pid(struct sock *sk)
-{
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-
- /* Only L2CAP_MODE_EXT_FLOWCTL ever need to access the PID in order to
- * group the channels being requested.
- */
- if (chan->mode != L2CAP_MODE_EXT_FLOWCTL)
- return;
-
- spin_lock(&sk->sk_peer_lock);
- sk->sk_peer_pid = get_pid(task_tgid(current));
- spin_unlock(&sk->sk_peer_lock);
-}
-
static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
@@ -268,8 +253,6 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
chan->mode != L2CAP_MODE_EXT_FLOWCTL)
chan->mode = L2CAP_MODE_LE_FLOWCTL;
- l2cap_sock_init_pid(sk);
-
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
&la.l2_bdaddr, la.l2_bdaddr_type);
if (err)
@@ -325,8 +308,6 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
goto done;
}
- l2cap_sock_init_pid(sk);
-
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
@@ -1858,21 +1839,13 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
struct sock *sk;
struct l2cap_chan *chan;
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, kern);
+ sk = bt_sock_alloc(net, sock, &l2cap_proto, proto, prio, kern);
if (!sk)
return NULL;
- sock_init_data(sock, sk);
- INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
-
sk->sk_destruct = l2cap_sock_destruct;
sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- sk->sk_protocol = proto;
- sk->sk_state = BT_OPEN;
-
chan = l2cap_chan_create();
if (!chan) {
sk_free(sk);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index d4498037fadc..d6c9b7bc8592 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -944,6 +944,12 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (cis_peripheral_capable(hdev))
settings |= MGMT_SETTING_CIS_PERIPHERAL;
+ if (bis_capable(hdev))
+ settings |= MGMT_SETTING_ISO_BROADCASTER;
+
+ if (sync_recv_capable(hdev))
+ settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
+
return settings;
}
@@ -3580,18 +3586,6 @@ unlock:
return err;
}
-static int abort_conn_sync(struct hci_dev *hdev, void *data)
-{
- struct hci_conn *conn;
- u16 handle = PTR_ERR(data);
-
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn)
- return 0;
-
- return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
-}
-
static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -3642,8 +3636,7 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
le_addr_type(addr->type));
if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
- hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
- NULL);
+ hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
unlock:
hci_dev_unlock(hdev);
@@ -8435,8 +8428,8 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
supported_flags = get_supported_adv_flags(hdev);
rp->supported_flags = cpu_to_le32(supported_flags);
- rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
- rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
+ rp->max_adv_data_len = max_adv_len(hdev);
+ rp->max_scan_rsp_len = max_adv_len(hdev);
rp->max_instances = hdev->le_num_of_adv_sets;
rp->num_instances = hdev->adv_instance_cnt;
@@ -8472,7 +8465,7 @@ static u8 calculate_name_len(struct hci_dev *hdev)
static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
bool is_adv_data)
{
- u8 max_len = HCI_MAX_AD_LENGTH;
+ u8 max_len = max_adv_len(hdev);
if (is_adv_data) {
if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index bf5cee48916c..abbafa6194ca 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -91,6 +91,33 @@ struct msft_ev_le_monitor_device {
struct msft_monitor_advertisement_handle_data {
__u8 msft_handle;
__u16 mgmt_handle;
+ __s8 rssi_high;
+ __s8 rssi_low;
+ __u8 rssi_low_interval;
+ __u8 rssi_sampling_period;
+ __u8 cond_type;
+ struct list_head list;
+};
+
+enum monitor_addr_filter_state {
+ AF_STATE_IDLE,
+ AF_STATE_ADDING,
+ AF_STATE_ADDED,
+ AF_STATE_REMOVING,
+};
+
+#define MSFT_MONITOR_ADVERTISEMENT_TYPE_ADDR 0x04
+struct msft_monitor_addr_filter_data {
+ __u8 msft_handle;
+ __u8 pattern_handle; /* address filters pertain to */
+ __u16 mgmt_handle;
+ int state;
+ __s8 rssi_high;
+ __s8 rssi_low;
+ __u8 rssi_low_interval;
+ __u8 rssi_sampling_period;
+ __u8 addr_type;
+ bdaddr_t bdaddr;
struct list_head list;
};
@@ -99,9 +126,12 @@ struct msft_data {
__u8 evt_prefix_len;
__u8 *evt_prefix;
struct list_head handle_map;
+ struct list_head address_filters;
__u8 resuming;
__u8 suspending;
__u8 filter_enabled;
+ /* To synchronize add/remove address filter and monitor device event.*/
+ struct mutex filter_lock;
};
bool msft_monitor_supported(struct hci_dev *hdev)
@@ -180,6 +210,24 @@ static struct msft_monitor_advertisement_handle_data *msft_find_handle_data
return NULL;
}
+/* This function requires the caller holds msft->filter_lock */
+static struct msft_monitor_addr_filter_data *msft_find_address_data
+ (struct hci_dev *hdev, u8 addr_type, bdaddr_t *addr,
+ u8 pattern_handle)
+{
+ struct msft_monitor_addr_filter_data *entry;
+ struct msft_data *msft = hdev->msft_data;
+
+ list_for_each_entry(entry, &msft->address_filters, list) {
+ if (entry->pattern_handle == pattern_handle &&
+ addr_type == entry->addr_type &&
+ !bacmp(addr, &entry->bdaddr))
+ return entry;
+ }
+
+ return NULL;
+}
+
/* This function requires the caller holds hdev->lock */
static int msft_monitor_device_del(struct hci_dev *hdev, __u16 mgmt_handle,
bdaddr_t *bdaddr, __u8 addr_type,
@@ -240,6 +288,7 @@ static int msft_le_monitor_advertisement_cb(struct hci_dev *hdev, u16 opcode,
handle_data->mgmt_handle = monitor->handle;
handle_data->msft_handle = rp->handle;
+ handle_data->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN;
INIT_LIST_HEAD(&handle_data->list);
list_add(&handle_data->list, &msft->handle_map);
@@ -254,6 +303,70 @@ unlock:
return status;
}
+/* This function requires the caller holds hci_req_sync_lock */
+static void msft_remove_addr_filters_sync(struct hci_dev *hdev, u8 handle)
+{
+ struct msft_monitor_addr_filter_data *address_filter, *n;
+ struct msft_cp_le_cancel_monitor_advertisement cp;
+ struct msft_data *msft = hdev->msft_data;
+ struct list_head head;
+ struct sk_buff *skb;
+
+ INIT_LIST_HEAD(&head);
+
+ /* Cancel all corresponding address monitors */
+ mutex_lock(&msft->filter_lock);
+
+ list_for_each_entry_safe(address_filter, n, &msft->address_filters,
+ list) {
+ if (address_filter->pattern_handle != handle)
+ continue;
+
+ list_del(&address_filter->list);
+
+ /* Keep the address filter and let
+ * msft_add_address_filter_sync() remove and free the address
+ * filter.
+ */
+ if (address_filter->state == AF_STATE_ADDING) {
+ address_filter->state = AF_STATE_REMOVING;
+ continue;
+ }
+
+ /* Keep the address filter and let
+ * msft_cancel_address_filter_sync() remove and free the address
+ * filter
+ */
+ if (address_filter->state == AF_STATE_REMOVING)
+ continue;
+
+ list_add_tail(&address_filter->list, &head);
+ }
+
+ mutex_unlock(&msft->filter_lock);
+
+ list_for_each_entry_safe(address_filter, n, &head, list) {
+ list_del(&address_filter->list);
+
+ cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT;
+ cp.handle = address_filter->msft_handle;
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR_OR_NULL(skb)) {
+ kfree(address_filter);
+ continue;
+ }
+
+ kfree_skb(skb);
+
+ bt_dev_dbg(hdev, "MSFT: Canceled device %pMR address filter",
+ &address_filter->bdaddr);
+
+ kfree(address_filter);
+ }
+}
+
static int msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
u16 opcode,
struct adv_monitor *monitor,
@@ -263,6 +376,7 @@ static int msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
struct msft_monitor_advertisement_handle_data *handle_data;
struct msft_data *msft = hdev->msft_data;
int status = 0;
+ u8 msft_handle;
rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data;
if (skb->len < sizeof(*rp)) {
@@ -293,11 +407,17 @@ static int msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
NULL, 0, false);
}
+ msft_handle = handle_data->msft_handle;
+
list_del(&handle_data->list);
kfree(handle_data);
- }
- hci_dev_unlock(hdev);
+ hci_dev_unlock(hdev);
+
+ msft_remove_addr_filters_sync(hdev, msft_handle);
+ } else {
+ hci_dev_unlock(hdev);
+ }
done:
return status;
@@ -394,12 +514,14 @@ static int msft_add_monitor_sync(struct hci_dev *hdev,
{
struct msft_cp_le_monitor_advertisement *cp;
struct msft_le_monitor_advertisement_pattern_data *pattern_data;
+ struct msft_monitor_advertisement_handle_data *handle_data;
struct msft_le_monitor_advertisement_pattern *pattern;
struct adv_pattern *entry;
size_t total_size = sizeof(*cp) + sizeof(*pattern_data);
ptrdiff_t offset = 0;
u8 pattern_count = 0;
struct sk_buff *skb;
+ int err;
if (!msft_monitor_pattern_valid(monitor))
return -EINVAL;
@@ -436,16 +558,31 @@ static int msft_add_monitor_sync(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp,
HCI_CMD_TIMEOUT);
- kfree(cp);
if (IS_ERR_OR_NULL(skb)) {
- if (!skb)
- return -EIO;
- return PTR_ERR(skb);
+ err = PTR_ERR(skb);
+ goto out_free;
}
- return msft_le_monitor_advertisement_cb(hdev, hdev->msft_opcode,
- monitor, skb);
+ err = msft_le_monitor_advertisement_cb(hdev, hdev->msft_opcode,
+ monitor, skb);
+ if (err)
+ goto out_free;
+
+ handle_data = msft_find_handle_data(hdev, monitor->handle, true);
+ if (!handle_data) {
+ err = -ENODATA;
+ goto out_free;
+ }
+
+ handle_data->rssi_high = cp->rssi_high;
+ handle_data->rssi_low = cp->rssi_low;
+ handle_data->rssi_low_interval = cp->rssi_low_interval;
+ handle_data->rssi_sampling_period = cp->rssi_sampling_period;
+
+out_free:
+ kfree(cp);
+ return err;
}
/* This function requires the caller holds hci_req_sync_lock */
@@ -538,6 +675,7 @@ void msft_do_close(struct hci_dev *hdev)
{
struct msft_data *msft = hdev->msft_data;
struct msft_monitor_advertisement_handle_data *handle_data, *tmp;
+ struct msft_monitor_addr_filter_data *address_filter, *n;
struct adv_monitor *monitor;
if (!msft)
@@ -559,6 +697,14 @@ void msft_do_close(struct hci_dev *hdev)
kfree(handle_data);
}
+ mutex_lock(&msft->filter_lock);
+ list_for_each_entry_safe(address_filter, n, &msft->address_filters,
+ list) {
+ list_del(&address_filter->list);
+ kfree(address_filter);
+ }
+ mutex_unlock(&msft->filter_lock);
+
hci_dev_lock(hdev);
/* Clear any devices that are being monitored and notify device lost */
@@ -568,6 +714,49 @@ void msft_do_close(struct hci_dev *hdev)
hci_dev_unlock(hdev);
}
+static int msft_cancel_address_filter_sync(struct hci_dev *hdev, void *data)
+{
+ struct msft_monitor_addr_filter_data *address_filter = data;
+ struct msft_cp_le_cancel_monitor_advertisement cp;
+ struct msft_data *msft = hdev->msft_data;
+ struct sk_buff *skb;
+ int err = 0;
+
+ if (!msft) {
+ bt_dev_err(hdev, "MSFT: msft data is freed");
+ return -EINVAL;
+ }
+
+ /* The address filter has been removed by hci dev close */
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return 0;
+
+ mutex_lock(&msft->filter_lock);
+ list_del(&address_filter->list);
+ mutex_unlock(&msft->filter_lock);
+
+ cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT;
+ cp.handle = address_filter->msft_handle;
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR_OR_NULL(skb)) {
+ bt_dev_err(hdev, "MSFT: Failed to cancel address (%pMR) filter",
+ &address_filter->bdaddr);
+ err = -EIO;
+ goto done;
+ }
+ kfree_skb(skb);
+
+ bt_dev_dbg(hdev, "MSFT: Canceled device %pMR address filter",
+ &address_filter->bdaddr);
+
+done:
+ kfree(address_filter);
+
+ return err;
+}
+
void msft_register(struct hci_dev *hdev)
{
struct msft_data *msft = NULL;
@@ -581,7 +770,9 @@ void msft_register(struct hci_dev *hdev)
}
INIT_LIST_HEAD(&msft->handle_map);
+ INIT_LIST_HEAD(&msft->address_filters);
hdev->msft_data = msft;
+ mutex_init(&msft->filter_lock);
}
void msft_unregister(struct hci_dev *hdev)
@@ -596,6 +787,7 @@ void msft_unregister(struct hci_dev *hdev)
hdev->msft_data = NULL;
kfree(msft->evt_prefix);
+ mutex_destroy(&msft->filter_lock);
kfree(msft);
}
@@ -645,11 +837,149 @@ static void *msft_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
return data;
}
+static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
+{
+ struct msft_monitor_addr_filter_data *address_filter = data;
+ struct msft_rp_le_monitor_advertisement *rp;
+ struct msft_cp_le_monitor_advertisement *cp;
+ struct msft_data *msft = hdev->msft_data;
+ struct sk_buff *skb = NULL;
+ bool remove = false;
+ size_t size;
+
+ if (!msft) {
+ bt_dev_err(hdev, "MSFT: msft data is freed");
+ return -EINVAL;
+ }
+
+ /* The address filter has been removed by hci dev close */
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return -ENODEV;
+
+ /* We are safe to use the address filter from now on.
+ * msft_monitor_device_evt() wouldn't delete this filter because it's
+ * not been added by now.
+ * And all other functions that requiring hci_req_sync_lock wouldn't
+ * touch this filter before this func completes because it's protected
+ * by hci_req_sync_lock.
+ */
+
+ if (address_filter->state == AF_STATE_REMOVING) {
+ mutex_lock(&msft->filter_lock);
+ list_del(&address_filter->list);
+ mutex_unlock(&msft->filter_lock);
+ kfree(address_filter);
+ return 0;
+ }
+
+ size = sizeof(*cp) +
+ sizeof(address_filter->addr_type) +
+ sizeof(address_filter->bdaddr);
+ cp = kzalloc(size, GFP_KERNEL);
+ if (!cp) {
+ bt_dev_err(hdev, "MSFT: Alloc cmd param err");
+ remove = true;
+ goto done;
+ }
+ cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
+ cp->rssi_high = address_filter->rssi_high;
+ cp->rssi_low = address_filter->rssi_low;
+ cp->rssi_low_interval = address_filter->rssi_low_interval;
+ cp->rssi_sampling_period = address_filter->rssi_sampling_period;
+ cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_ADDR;
+ cp->data[0] = address_filter->addr_type;
+ memcpy(&cp->data[1], &address_filter->bdaddr,
+ sizeof(address_filter->bdaddr));
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, size, cp,
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR_OR_NULL(skb)) {
+ bt_dev_err(hdev, "Failed to enable address %pMR filter",
+ &address_filter->bdaddr);
+ skb = NULL;
+ remove = true;
+ goto done;
+ }
+
+ rp = skb_pull_data(skb, sizeof(*rp));
+ if (!rp || rp->sub_opcode != MSFT_OP_LE_MONITOR_ADVERTISEMENT ||
+ rp->status)
+ remove = true;
+
+done:
+ mutex_lock(&msft->filter_lock);
+
+ if (remove) {
+ bt_dev_warn(hdev, "MSFT: Remove address (%pMR) filter",
+ &address_filter->bdaddr);
+ list_del(&address_filter->list);
+ kfree(address_filter);
+ } else {
+ address_filter->state = AF_STATE_ADDED;
+ address_filter->msft_handle = rp->handle;
+ bt_dev_dbg(hdev, "MSFT: Address %pMR filter enabled",
+ &address_filter->bdaddr);
+ }
+ mutex_unlock(&msft->filter_lock);
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+/* This function requires the caller holds msft->filter_lock */
+static struct msft_monitor_addr_filter_data *msft_add_address_filter
+ (struct hci_dev *hdev, u8 addr_type, bdaddr_t *bdaddr,
+ struct msft_monitor_advertisement_handle_data *handle_data)
+{
+ struct msft_monitor_addr_filter_data *address_filter = NULL;
+ struct msft_data *msft = hdev->msft_data;
+ int err;
+
+ address_filter = kzalloc(sizeof(*address_filter), GFP_KERNEL);
+ if (!address_filter)
+ return NULL;
+
+ address_filter->state = AF_STATE_ADDING;
+ address_filter->msft_handle = 0xff;
+ address_filter->pattern_handle = handle_data->msft_handle;
+ address_filter->mgmt_handle = handle_data->mgmt_handle;
+ address_filter->rssi_high = handle_data->rssi_high;
+ address_filter->rssi_low = handle_data->rssi_low;
+ address_filter->rssi_low_interval = handle_data->rssi_low_interval;
+ address_filter->rssi_sampling_period = handle_data->rssi_sampling_period;
+ address_filter->addr_type = addr_type;
+ bacpy(&address_filter->bdaddr, bdaddr);
+
+ /* With the above AF_STATE_ADDING, duplicated address filter can be
+ * avoided when receiving monitor device event (found/lost) frequently
+ * for the same device.
+ */
+ list_add_tail(&address_filter->list, &msft->address_filters);
+
+ err = hci_cmd_sync_queue(hdev, msft_add_address_filter_sync,
+ address_filter, NULL);
+ if (err < 0) {
+ bt_dev_err(hdev, "MSFT: Add address %pMR filter err", bdaddr);
+ list_del(&address_filter->list);
+ kfree(address_filter);
+ return NULL;
+ }
+
+ bt_dev_dbg(hdev, "MSFT: Add device %pMR address filter",
+ &address_filter->bdaddr);
+
+ return address_filter;
+}
+
/* This function requires the caller holds hdev->lock */
static void msft_monitor_device_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
+ struct msft_monitor_addr_filter_data *n, *address_filter = NULL;
struct msft_ev_le_monitor_device *ev;
struct msft_monitor_advertisement_handle_data *handle_data;
+ struct msft_data *msft = hdev->msft_data;
+ u16 mgmt_handle = 0xffff;
u8 addr_type;
ev = msft_skb_pull(hdev, skb, MSFT_EV_LE_MONITOR_DEVICE, sizeof(*ev));
@@ -662,9 +992,53 @@ static void msft_monitor_device_evt(struct hci_dev *hdev, struct sk_buff *skb)
ev->monitor_state, &ev->bdaddr);
handle_data = msft_find_handle_data(hdev, ev->monitor_handle, false);
- if (!handle_data)
+
+ if (!test_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks)) {
+ if (!handle_data)
+ return;
+ mgmt_handle = handle_data->mgmt_handle;
+ goto report_state;
+ }
+
+ if (handle_data) {
+ /* Don't report any device found/lost event from pattern
+ * monitors. Pattern monitor always has its address filters for
+ * tracking devices.
+ */
+
+ address_filter = msft_find_address_data(hdev, ev->addr_type,
+ &ev->bdaddr,
+ handle_data->msft_handle);
+ if (address_filter)
+ return;
+
+ if (ev->monitor_state && handle_data->cond_type ==
+ MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN)
+ msft_add_address_filter(hdev, ev->addr_type,
+ &ev->bdaddr, handle_data);
+
return;
+ }
+ /* This device event is not from pattern monitor.
+ * Report it if there is a corresponding address_filter for it.
+ */
+ list_for_each_entry(n, &msft->address_filters, list) {
+ if (n->state == AF_STATE_ADDED &&
+ n->msft_handle == ev->monitor_handle) {
+ mgmt_handle = n->mgmt_handle;
+ address_filter = n;
+ break;
+ }
+ }
+
+ if (!address_filter) {
+ bt_dev_warn(hdev, "MSFT: Unexpected device event %pMR, %u, %u",
+ &ev->bdaddr, ev->monitor_handle, ev->monitor_state);
+ return;
+ }
+
+report_state:
switch (ev->addr_type) {
case ADDR_LE_DEV_PUBLIC:
addr_type = BDADDR_LE_PUBLIC;
@@ -681,12 +1055,18 @@ static void msft_monitor_device_evt(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
- if (ev->monitor_state)
- msft_device_found(hdev, &ev->bdaddr, addr_type,
- handle_data->mgmt_handle);
- else
- msft_device_lost(hdev, &ev->bdaddr, addr_type,
- handle_data->mgmt_handle);
+ if (ev->monitor_state) {
+ msft_device_found(hdev, &ev->bdaddr, addr_type, mgmt_handle);
+ } else {
+ if (address_filter && address_filter->state == AF_STATE_ADDED) {
+ address_filter->state = AF_STATE_REMOVING;
+ hci_cmd_sync_queue(hdev,
+ msft_cancel_address_filter_sync,
+ address_filter,
+ NULL);
+ }
+ msft_device_lost(hdev, &ev->bdaddr, addr_type, mgmt_handle);
+ }
}
void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb)
@@ -724,7 +1104,9 @@ void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb)
switch (*evt) {
case MSFT_EV_LE_MONITOR_DEVICE:
+ mutex_lock(&msft->filter_lock);
msft_monitor_device_evt(hdev, skb);
+ mutex_unlock(&msft->filter_lock);
break;
default:
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 4397e14ff560..b54e8a530f55 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -268,18 +268,16 @@ static struct proto rfcomm_proto = {
.obj_size = sizeof(struct rfcomm_pinfo)
};
-static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern)
+static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock,
+ int proto, gfp_t prio, int kern)
{
struct rfcomm_dlc *d;
struct sock *sk;
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, kern);
+ sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern);
if (!sk)
return NULL;
- sock_init_data(sock, sk);
- INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
-
d = rfcomm_dlc_alloc(prio);
if (!d) {
sk_free(sk);
@@ -298,11 +296,6 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int
sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- sk->sk_protocol = proto;
- sk->sk_state = BT_OPEN;
-
bt_sock_link(&rfcomm_sk_list, sk);
BT_DBG("sk %p", sk);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 7762604ddfc0..50ad5935ae47 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -68,7 +68,6 @@ struct sco_pinfo {
bdaddr_t dst;
__u32 flags;
__u16 setting;
- __u8 cmsg_mask;
struct bt_codec codec;
struct sco_conn *conn;
};
@@ -471,15 +470,6 @@ static void sco_sock_close(struct sock *sk)
release_sock(sk);
}
-static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg,
- struct sock *sk)
-{
- if (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS)
- put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS,
- sizeof(bt_cb(skb)->sco.pkt_status),
- &bt_cb(skb)->sco.pkt_status);
-}
-
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
@@ -488,8 +478,6 @@ static void sco_sock_init(struct sock *sk, struct sock *parent)
sk->sk_type = parent->sk_type;
bt_sk(sk)->flags = bt_sk(parent)->flags;
security_sk_clone(parent, sk);
- } else {
- bt_sk(sk)->skb_put_cmsg = sco_skb_put_cmsg;
}
}
@@ -504,21 +492,13 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
{
struct sock *sk;
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern);
+ sk = bt_sock_alloc(net, sock, &sco_proto, proto, prio, kern);
if (!sk)
return NULL;
- sock_init_data(sock, sk);
- INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
-
sk->sk_destruct = sco_sock_destruct;
sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
- sock_reset_flag(sk, SOCK_ZAPPED);
-
- sk->sk_protocol = proto;
- sk->sk_state = BT_OPEN;
-
sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
sco_pi(sk)->codec.id = BT_CODEC_CVSD;
sco_pi(sk)->codec.cid = 0xffff;
@@ -915,9 +895,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
}
if (opt)
- sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS;
+ set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
else
- sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS;
+ clear_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags);
break;
case BT_CODEC:
@@ -1048,7 +1028,6 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
int len, err = 0;
struct bt_voice voice;
u32 phys;
- int pkt_status;
int buf_len;
struct codec_list *c;
u8 num_codecs, i, __user *ptr;
@@ -1102,9 +1081,8 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
break;
case BT_PKT_STATUS:
- pkt_status = (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS);
-
- if (put_user(pkt_status, (int __user *)optval))
+ if (put_user(test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags),
+ (int __user *)optval))
err = -EFAULT;
break;
diff --git a/net/core/dev.c b/net/core/dev.c
index 636b41f0b32d..17e6281e408c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -9589,6 +9589,11 @@ static int dev_index_reserve(struct net *net, u32 ifindex)
{
int err;
+ if (ifindex > INT_MAX) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
if (!ifindex)
err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL,
xa_limit_31b, &net->ifindex, GFP_KERNEL);
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 797c813c7c77..c1aea8b756b6 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -10,11 +10,11 @@
static int
netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
- u32 portid, u32 seq, int flags, u32 cmd)
+ const struct genl_info *info)
{
void *hdr;
- hdr = genlmsg_put(rsp, portid, seq, &netdev_nl_family, flags, cmd);
+ hdr = genlmsg_iput(rsp, info);
if (!hdr)
return -EMSGSIZE;
@@ -41,17 +41,20 @@ netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp,
static void
netdev_genl_dev_notify(struct net_device *netdev, int cmd)
{
+ struct genl_info info;
struct sk_buff *ntf;
if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev),
NETDEV_NLGRP_MGMT))
return;
+ genl_info_init_ntf(&info, &netdev_nl_family, cmd);
+
ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!ntf)
return;
- if (netdev_nl_dev_fill(netdev, ntf, 0, 0, 0, cmd)) {
+ if (netdev_nl_dev_fill(netdev, ntf, &info)) {
nlmsg_free(ntf);
return;
}
@@ -80,8 +83,7 @@ int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
netdev = __dev_get_by_index(genl_info_net(info), ifindex);
if (netdev)
- err = netdev_nl_dev_fill(netdev, rsp, info->snd_portid,
- info->snd_seq, 0, info->genlhdr->cmd);
+ err = netdev_nl_dev_fill(netdev, rsp, info);
else
err = -ENODEV;
@@ -105,10 +107,7 @@ int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
rtnl_lock();
for_each_netdev_dump(net, netdev, cb->args[0]) {
- err = netdev_nl_dev_fill(netdev, skb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, 0,
- NETDEV_CMD_DEV_GET);
+ err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb));
if (err < 0)
break;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 525619776c6f..22d94394335f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -767,7 +767,7 @@ bool sk_mc_loop(struct sock *sk)
return true;
switch (sk->sk_family) {
case AF_INET:
- return inet_sk(sk)->mc_loop;
+ return inet_test_bit(MC_LOOP, sk);
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
return inet6_sk(sk)->mc_loop;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8e919cfe6e23..8dd6837c476a 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -247,7 +247,6 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
const u8 offset = iph->ihl << 2;
const struct dccp_hdr *dh;
struct dccp_sock *dp;
- struct inet_sock *inet;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
@@ -361,8 +360,7 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
* --ANK (980905)
*/
- inet = inet_sk(sk);
- if (!sock_owned_by_user(sk) && inet->recverr) {
+ if (!sock_owned_by_user(sk) && inet_test_bit(RECVERR, sk)) {
sk->sk_err = err;
sk_error_report(sk);
} else { /* Only an error on timeout */
diff --git a/net/devlink/dev.c b/net/devlink/dev.c
index 5dfba2248b90..abf3393a7a17 100644
--- a/net/devlink/dev.c
+++ b/net/devlink/dev.c
@@ -218,11 +218,11 @@ int devlink_nl_get_doit(struct sk_buff *skb, struct genl_info *info)
static int
devlink_nl_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb)
+ struct netlink_callback *cb, int flags)
{
return devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, flags);
}
int devlink_nl_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
@@ -828,13 +828,13 @@ int devlink_nl_info_get_doit(struct sk_buff *skb, struct genl_info *info)
static int
devlink_nl_info_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb)
+ struct netlink_callback *cb, int flags)
{
int err;
err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ cb->nlh->nlmsg_seq, flags,
cb->extack);
if (err == -EOPNOTSUPP)
err = 0;
@@ -1206,8 +1206,7 @@ err_cancel_msg:
return err;
}
-int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_selftests_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct sk_buff *msg;
@@ -1230,23 +1229,25 @@ int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb,
return genlmsg_reply(msg, info);
}
-static int
-devlink_nl_cmd_selftests_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb)
+static int devlink_nl_selftests_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
{
if (!devlink->ops->selftest_check)
return 0;
return devlink_nl_selftests_fill(msg, devlink,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ cb->nlh->nlmsg_seq, flags,
cb->extack);
}
-const struct devlink_cmd devl_cmd_selftests_get = {
- .dump_one = devlink_nl_cmd_selftests_get_dump_one,
-};
+int devlink_nl_selftests_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_selftests_get_dump_one);
+}
static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id,
enum devlink_selftest_status test_status)
diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h
index 7fdd956ff992..eb1d5066c73f 100644
--- a/net/devlink/devl_internal.h
+++ b/net/devlink/devl_internal.h
@@ -92,9 +92,6 @@ static inline bool devl_is_registered(struct devlink *devlink)
/* Netlink */
#define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
-#define DEVLINK_NL_FLAG_NEED_RATE BIT(2)
-#define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3)
-#define DEVLINK_NL_FLAG_NEED_LINECARD BIT(4)
enum devlink_multicast_groups {
DEVLINK_MCGRP_CONFIG,
@@ -118,13 +115,10 @@ struct devlink_nl_dump_state {
typedef int devlink_nl_dump_one_func_t(struct sk_buff *msg,
struct devlink *devlink,
- struct netlink_callback *cb);
+ struct netlink_callback *cb,
+ int flags);
-struct devlink_cmd {
- devlink_nl_dump_one_func_t *dump_one;
-};
-
-extern const struct genl_small_ops devlink_nl_small_ops[54];
+extern const struct genl_small_ops devlink_nl_small_ops[40];
struct devlink *
devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs);
@@ -134,7 +128,6 @@ void devlink_notify_register(struct devlink *devlink);
int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
devlink_nl_dump_one_func_t *dump_one);
-int devlink_nl_instance_iter_dumpit(struct sk_buff *msg, struct netlink_callback *cb);
static inline struct devlink_nl_dump_state *
devlink_dump_state(struct netlink_callback *cb)
@@ -154,22 +147,6 @@ devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
return 0;
}
-/* Commands */
-extern const struct devlink_cmd devl_cmd_port_get;
-extern const struct devlink_cmd devl_cmd_sb_get;
-extern const struct devlink_cmd devl_cmd_sb_pool_get;
-extern const struct devlink_cmd devl_cmd_sb_port_pool_get;
-extern const struct devlink_cmd devl_cmd_sb_tc_pool_bind_get;
-extern const struct devlink_cmd devl_cmd_param_get;
-extern const struct devlink_cmd devl_cmd_region_get;
-extern const struct devlink_cmd devl_cmd_health_reporter_get;
-extern const struct devlink_cmd devl_cmd_trap_get;
-extern const struct devlink_cmd devl_cmd_trap_group_get;
-extern const struct devlink_cmd devl_cmd_trap_policer_get;
-extern const struct devlink_cmd devl_cmd_rate_get;
-extern const struct devlink_cmd devl_cmd_linecard_get;
-extern const struct devlink_cmd devl_cmd_selftests_get;
-
/* Notify */
void devlink_notify(struct devlink *devlink, enum devlink_command cmd);
@@ -203,29 +180,16 @@ int devlink_resources_validate(struct devlink *devlink,
struct devlink_resource *resource,
struct genl_info *info);
-/* Line cards */
-struct devlink_linecard;
-
-struct devlink_linecard *
-devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info);
-
/* Rates */
int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
-struct devlink_rate *
-devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info);
-struct devlink_rate *
-devlink_rate_node_get_from_info(struct devlink *devlink,
- struct genl_info *info);
+
/* Devlink nl cmds */
int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_cmd_flash_update(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_selftests_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_cmd_selftests_run(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
- struct genl_info *info);
int devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
struct genl_info *info);
int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
diff --git a/net/devlink/health.c b/net/devlink/health.c
index 194340a8bb86..638cad8d5c65 100644
--- a/net/devlink/health.c
+++ b/net/devlink/health.c
@@ -356,8 +356,8 @@ devlink_health_reporter_get_from_info(struct devlink *devlink,
return devlink_health_reporter_get_from_attrs(devlink, info->attrs);
}
-int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_health_reporter_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_health_reporter *reporter;
@@ -384,18 +384,29 @@ int devlink_nl_cmd_health_reporter_get_doit(struct sk_buff *skb,
return genlmsg_reply(msg, info);
}
-static int
-devlink_nl_cmd_health_reporter_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb)
+static int devlink_nl_health_reporter_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ const struct genl_info *info = genl_info_dump(cb);
struct devlink_health_reporter *reporter;
+ unsigned long port_index_end = ULONG_MAX;
+ struct nlattr **attrs = info->attrs;
+ unsigned long port_index_start = 0;
struct devlink_port *port;
unsigned long port_index;
int idx = 0;
int err;
+ if (attrs && attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ port_index_start = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
+ port_index_end = port_index_start;
+ flags |= NLM_F_DUMP_FILTERED;
+ goto per_port_dump;
+ }
+
list_for_each_entry(reporter, &devlink->reporter_list, list) {
if (idx < state->idx) {
idx++;
@@ -405,14 +416,16 @@ devlink_nl_cmd_health_reporter_get_dump_one(struct sk_buff *msg,
DEVLINK_CMD_HEALTH_REPORTER_GET,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
+ flags);
if (err) {
state->idx = idx;
return err;
}
idx++;
}
- xa_for_each(&devlink->ports, port_index, port) {
+per_port_dump:
+ xa_for_each_range(&devlink->ports, port_index, port,
+ port_index_start, port_index_end) {
list_for_each_entry(reporter, &port->reporter_list, list) {
if (idx < state->idx) {
idx++;
@@ -422,7 +435,7 @@ devlink_nl_cmd_health_reporter_get_dump_one(struct sk_buff *msg,
DEVLINK_CMD_HEALTH_REPORTER_GET,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
+ flags);
if (err) {
state->idx = idx;
return err;
@@ -434,9 +447,12 @@ devlink_nl_cmd_health_reporter_get_dump_one(struct sk_buff *msg,
return 0;
}
-const struct devlink_cmd devl_cmd_health_reporter_get = {
- .dump_one = devlink_nl_cmd_health_reporter_get_dump_one,
-};
+int devlink_nl_health_reporter_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb,
+ devlink_nl_health_reporter_get_dump_one);
+}
int devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
struct genl_info *info)
@@ -1248,7 +1264,7 @@ out:
static struct devlink_health_reporter *
devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
{
- const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ const struct genl_info *info = genl_info_dump(cb);
struct devlink_health_reporter *reporter;
struct nlattr **attrs = info->attrs;
struct devlink *devlink;
diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c
index e7900d9fa205..72ba8a716525 100644
--- a/net/devlink/leftover.c
+++ b/net/devlink/leftover.c
@@ -232,13 +232,13 @@ devlink_rate_node_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
return devlink_rate_node_get_by_name(devlink, rate_node_name);
}
-struct devlink_rate *
+static struct devlink_rate *
devlink_rate_node_get_from_info(struct devlink *devlink, struct genl_info *info)
{
return devlink_rate_node_get_from_attrs(devlink, info->attrs);
}
-struct devlink_rate *
+static struct devlink_rate *
devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info)
{
struct nlattr **attrs = info->attrs;
@@ -285,7 +285,7 @@ devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
return ERR_PTR(-EINVAL);
}
-struct devlink_linecard *
+static struct devlink_linecard *
devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info)
{
return devlink_linecard_get_from_attrs(devlink, info->attrs);
@@ -1005,8 +1005,8 @@ static void devlink_rate_notify(struct devlink_rate *devlink_rate,
}
static int
-devlink_nl_cmd_rate_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb)
+devlink_nl_rate_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_rate *devlink_rate;
@@ -1022,8 +1022,7 @@ devlink_nl_cmd_rate_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
continue;
}
err = devlink_nl_rate_fill(msg, devlink_rate, cmd, id,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI, NULL);
+ cb->nlh->nlmsg_seq, flags, NULL);
if (err) {
state->idx = idx;
break;
@@ -1034,17 +1033,22 @@ devlink_nl_cmd_rate_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
return err;
}
-const struct devlink_cmd devl_cmd_rate_get = {
- .dump_one = devlink_nl_cmd_rate_get_dump_one,
-};
+int devlink_nl_rate_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_rate_get_dump_one);
+}
-static int devlink_nl_cmd_rate_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_rate_get_doit(struct sk_buff *skb, struct genl_info *info)
{
- struct devlink_rate *devlink_rate = info->user_ptr[1];
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_rate *devlink_rate;
struct sk_buff *msg;
int err;
+ devlink_rate = devlink_rate_get_from_info(devlink, info);
+ if (IS_ERR(devlink_rate))
+ return PTR_ERR(devlink_rate);
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -1072,8 +1076,7 @@ devlink_rate_is_parent_node(struct devlink_rate *devlink_rate,
return false;
}
-static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_port_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
struct sk_buff *msg;
@@ -1095,8 +1098,8 @@ static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
}
static int
-devlink_nl_cmd_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb)
+devlink_nl_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_port *devlink_port;
@@ -1107,8 +1110,8 @@ devlink_nl_cmd_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
err = devlink_nl_port_fill(msg, devlink_port,
DEVLINK_CMD_NEW,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI, cb->extack);
+ cb->nlh->nlmsg_seq, flags,
+ cb->extack);
if (err) {
state->idx = port_index;
break;
@@ -1118,9 +1121,10 @@ devlink_nl_cmd_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
return err;
}
-const struct devlink_cmd devl_cmd_port_get = {
- .dump_one = devlink_nl_cmd_port_get_dump_one,
-};
+int devlink_nl_port_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_port_get_dump_one);
+}
static int devlink_port_type_set(struct devlink_port *devlink_port,
enum devlink_port_type port_type)
@@ -1629,11 +1633,16 @@ static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
static int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb,
struct genl_info *info)
{
- struct devlink_rate *devlink_rate = info->user_ptr[1];
- struct devlink *devlink = devlink_rate->devlink;
- const struct devlink_ops *ops = devlink->ops;
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_rate *devlink_rate;
+ const struct devlink_ops *ops;
int err;
+ devlink_rate = devlink_rate_get_from_info(devlink, info);
+ if (IS_ERR(devlink_rate))
+ return PTR_ERR(devlink_rate);
+
+ ops = devlink->ops;
if (!ops || !devlink_rate_set_ops_supported(ops, info, devlink_rate->type))
return -EOPNOTSUPP;
@@ -1704,18 +1713,22 @@ err_strdup:
static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb,
struct genl_info *info)
{
- struct devlink_rate *rate_node = info->user_ptr[1];
- struct devlink *devlink = rate_node->devlink;
- const struct devlink_ops *ops = devlink->ops;
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_rate *rate_node;
int err;
+ rate_node = devlink_rate_node_get_from_info(devlink, info);
+ if (IS_ERR(rate_node))
+ return PTR_ERR(rate_node);
+
if (refcount_read(&rate_node->refcnt) > 1) {
NL_SET_ERR_MSG(info->extack, "Node has children. Cannot delete node.");
return -EBUSY;
}
devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
- err = ops->rate_node_del(rate_node, rate_node->priv, info->extack);
+ err = devlink->ops->rate_node_del(rate_node, rate_node->priv,
+ info->extack);
if (rate_node->parent)
refcount_dec(&rate_node->parent->refcnt);
list_del(&rate_node->list);
@@ -1811,14 +1824,17 @@ static void devlink_linecard_notify(struct devlink_linecard *linecard,
msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
}
-static int devlink_nl_cmd_linecard_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_linecard_get_doit(struct sk_buff *skb, struct genl_info *info)
{
- struct devlink_linecard *linecard = info->user_ptr[1];
- struct devlink *devlink = linecard->devlink;
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_linecard *linecard;
struct sk_buff *msg;
int err;
+ linecard = devlink_linecard_get_from_info(devlink, info);
+ if (IS_ERR(linecard))
+ return PTR_ERR(linecard);
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -1837,9 +1853,10 @@ static int devlink_nl_cmd_linecard_get_doit(struct sk_buff *skb,
return genlmsg_reply(msg, info);
}
-static int devlink_nl_cmd_linecard_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb)
+static int devlink_nl_linecard_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_linecard *linecard;
@@ -1855,8 +1872,7 @@ static int devlink_nl_cmd_linecard_get_dump_one(struct sk_buff *msg,
err = devlink_nl_linecard_fill(msg, devlink, linecard,
DEVLINK_CMD_LINECARD_NEW,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI,
+ cb->nlh->nlmsg_seq, flags,
cb->extack);
mutex_unlock(&linecard->state_lock);
if (err) {
@@ -1869,9 +1885,11 @@ static int devlink_nl_cmd_linecard_get_dump_one(struct sk_buff *msg,
return err;
}
-const struct devlink_cmd devl_cmd_linecard_get = {
- .dump_one = devlink_nl_cmd_linecard_get_dump_one,
-};
+int devlink_nl_linecard_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_linecard_get_dump_one);
+}
static struct devlink_linecard_type *
devlink_linecard_type_lookup(struct devlink_linecard *linecard,
@@ -2008,10 +2026,15 @@ out:
static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb,
struct genl_info *info)
{
- struct devlink_linecard *linecard = info->user_ptr[1];
struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_linecard *linecard;
int err;
+ linecard = devlink_linecard_get_from_info(devlink, info);
+ if (IS_ERR(linecard))
+ return PTR_ERR(linecard);
+
if (info->attrs[DEVLINK_ATTR_LINECARD_TYPE]) {
const char *type;
@@ -2068,8 +2091,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_sb_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_sb *devlink_sb;
@@ -2096,8 +2118,8 @@ static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
}
static int
-devlink_nl_cmd_sb_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb)
+devlink_nl_sb_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_sb *devlink_sb;
@@ -2112,8 +2134,7 @@ devlink_nl_cmd_sb_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
DEVLINK_CMD_SB_NEW,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, flags);
if (err) {
state->idx = idx;
break;
@@ -2124,9 +2145,10 @@ devlink_nl_cmd_sb_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
return err;
}
-const struct devlink_cmd devl_cmd_sb_get = {
- .dump_one = devlink_nl_cmd_sb_get_dump_one,
-};
+int devlink_nl_sb_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_sb_get_dump_one);
+}
static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
struct devlink_sb *devlink_sb,
@@ -2171,8 +2193,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_sb_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_sb *devlink_sb;
@@ -2210,7 +2231,7 @@ static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
struct devlink *devlink,
struct devlink_sb *devlink_sb,
- u32 portid, u32 seq)
+ u32 portid, u32 seq, int flags)
{
u16 pool_count = devlink_sb_pool_count(devlink_sb);
u16 pool_index;
@@ -2225,7 +2246,7 @@ static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
devlink_sb,
pool_index,
DEVLINK_CMD_SB_POOL_NEW,
- portid, seq, NLM_F_MULTI);
+ portid, seq, flags);
if (err)
return err;
(*p_idx)++;
@@ -2234,9 +2255,8 @@ static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
}
static int
-devlink_nl_cmd_sb_pool_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb)
+devlink_nl_sb_pool_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_sb *devlink_sb;
@@ -2250,7 +2270,7 @@ devlink_nl_cmd_sb_pool_get_dump_one(struct sk_buff *msg,
err = __sb_pool_get_dumpit(msg, state->idx, &idx,
devlink, devlink_sb,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq);
+ cb->nlh->nlmsg_seq, flags);
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
@@ -2262,9 +2282,11 @@ devlink_nl_cmd_sb_pool_get_dump_one(struct sk_buff *msg,
return err;
}
-const struct devlink_cmd devl_cmd_sb_pool_get = {
- .dump_one = devlink_nl_cmd_sb_pool_get_dump_one,
-};
+int devlink_nl_sb_pool_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_sb_pool_get_dump_one);
+}
static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
u16 pool_index, u32 size,
@@ -2371,8 +2393,8 @@ sb_occ_get_failure:
return err;
}
-static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_sb_port_pool_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
struct devlink *devlink = devlink_port->devlink;
@@ -2412,7 +2434,7 @@ static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
struct devlink *devlink,
struct devlink_sb *devlink_sb,
- u32 portid, u32 seq)
+ u32 portid, u32 seq, int flags)
{
struct devlink_port *devlink_port;
u16 pool_count = devlink_sb_pool_count(devlink_sb);
@@ -2431,8 +2453,7 @@ static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
devlink_sb,
pool_index,
DEVLINK_CMD_SB_PORT_POOL_NEW,
- portid, seq,
- NLM_F_MULTI);
+ portid, seq, flags);
if (err)
return err;
(*p_idx)++;
@@ -2442,9 +2463,9 @@ static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
}
static int
-devlink_nl_cmd_sb_port_pool_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb)
+devlink_nl_sb_port_pool_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_sb *devlink_sb;
@@ -2458,7 +2479,7 @@ devlink_nl_cmd_sb_port_pool_get_dump_one(struct sk_buff *msg,
err = __sb_port_pool_get_dumpit(msg, state->idx, &idx,
devlink, devlink_sb,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq);
+ cb->nlh->nlmsg_seq, flags);
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
@@ -2470,9 +2491,11 @@ devlink_nl_cmd_sb_port_pool_get_dump_one(struct sk_buff *msg,
return err;
}
-const struct devlink_cmd devl_cmd_sb_port_pool_get = {
- .dump_one = devlink_nl_cmd_sb_port_pool_get_dump_one,
-};
+int devlink_nl_sb_port_pool_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_sb_port_pool_get_dump_one);
+}
static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
@@ -2580,8 +2603,8 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
struct devlink *devlink = devlink_port->devlink;
@@ -2628,7 +2651,7 @@ static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
int start, int *p_idx,
struct devlink *devlink,
struct devlink_sb *devlink_sb,
- u32 portid, u32 seq)
+ u32 portid, u32 seq, int flags)
{
struct devlink_port *devlink_port;
unsigned long port_index;
@@ -2649,7 +2672,7 @@ static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
DEVLINK_SB_POOL_TYPE_INGRESS,
DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
portid, seq,
- NLM_F_MULTI);
+ flags);
if (err)
return err;
(*p_idx)++;
@@ -2667,7 +2690,7 @@ static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
DEVLINK_SB_POOL_TYPE_EGRESS,
DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
portid, seq,
- NLM_F_MULTI);
+ flags);
if (err)
return err;
(*p_idx)++;
@@ -2676,10 +2699,10 @@ static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
return 0;
}
-static int
-devlink_nl_cmd_sb_tc_pool_bind_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb)
+static int devlink_nl_sb_tc_pool_bind_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_sb *devlink_sb;
@@ -2693,7 +2716,7 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dump_one(struct sk_buff *msg,
err = __sb_tc_pool_bind_get_dumpit(msg, state->idx, &idx,
devlink, devlink_sb,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq);
+ cb->nlh->nlmsg_seq, flags);
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
@@ -2705,9 +2728,12 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dump_one(struct sk_buff *msg,
return err;
}
-const struct devlink_cmd devl_cmd_sb_tc_pool_bind_get = {
- .dump_one = devlink_nl_cmd_sb_tc_pool_bind_get_dump_one,
-};
+int devlink_nl_sb_tc_pool_bind_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb,
+ devlink_nl_sb_tc_pool_bind_get_dump_one);
+}
static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 tc_index,
@@ -4155,9 +4181,10 @@ static void devlink_param_notify(struct devlink *devlink,
msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
}
-static int
-devlink_nl_cmd_param_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb)
+static int devlink_nl_param_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_param_item *param_item;
@@ -4168,8 +4195,7 @@ devlink_nl_cmd_param_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
err = devlink_nl_param_fill(msg, devlink, 0, param_item,
DEVLINK_CMD_PARAM_GET,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, flags);
if (err == -EOPNOTSUPP) {
err = 0;
} else if (err) {
@@ -4181,9 +4207,11 @@ devlink_nl_cmd_param_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
return err;
}
-const struct devlink_cmd devl_cmd_param_get = {
- .dump_one = devlink_nl_cmd_param_get_dump_one,
-};
+int devlink_nl_param_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_param_get_dump_one);
+}
static int
devlink_param_type_get_from_info(struct genl_info *info,
@@ -4272,8 +4300,8 @@ devlink_param_get_from_info(struct xarray *params, struct genl_info *info)
return devlink_param_find_by_name(params, param_name);
}
-static int devlink_nl_cmd_param_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_param_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_param_item *param_item;
@@ -4770,8 +4798,7 @@ static void devlink_region_snapshot_del(struct devlink_region *region,
kfree(snapshot);
}
-static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_region_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_port *port = NULL;
@@ -4819,8 +4846,7 @@ static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb,
static int devlink_nl_cmd_region_get_port_dumpit(struct sk_buff *msg,
struct netlink_callback *cb,
struct devlink_port *port,
- int *idx,
- int start)
+ int *idx, int start, int flags)
{
struct devlink_region *region;
int err = 0;
@@ -4834,7 +4860,7 @@ static int devlink_nl_cmd_region_get_port_dumpit(struct sk_buff *msg,
DEVLINK_CMD_REGION_GET,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- NLM_F_MULTI, region);
+ flags, region);
if (err)
goto out;
(*idx)++;
@@ -4844,9 +4870,10 @@ out:
return err;
}
-static int
-devlink_nl_cmd_region_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb)
+static int devlink_nl_region_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_region *region;
@@ -4863,8 +4890,8 @@ devlink_nl_cmd_region_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
err = devlink_nl_region_fill(msg, devlink,
DEVLINK_CMD_REGION_GET,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI, region);
+ cb->nlh->nlmsg_seq, flags,
+ region);
if (err) {
state->idx = idx;
return err;
@@ -4874,7 +4901,7 @@ devlink_nl_cmd_region_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
xa_for_each(&devlink->ports, port_index, port) {
err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, &idx,
- state->idx);
+ state->idx, flags);
if (err) {
state->idx = idx;
return err;
@@ -4884,9 +4911,11 @@ devlink_nl_cmd_region_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
return 0;
}
-const struct devlink_cmd devl_cmd_region_get = {
- .dump_one = devlink_nl_cmd_region_get_dump_one,
-};
+int devlink_nl_region_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_region_get_dump_one);
+}
static int devlink_nl_cmd_region_del(struct sk_buff *skb,
struct genl_info *info)
@@ -5172,7 +5201,7 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct nlattr *chunks_attr, *region_attr, *snapshot_attr;
u64 ret_offset, start_offset, end_offset = U64_MAX;
- struct nlattr **attrs = info->attrs;
+ struct nlattr **attrs = info->info.attrs;
struct devlink_port *port = NULL;
devlink_chunk_fill_t *region_cb;
struct devlink_region *region;
@@ -5195,8 +5224,8 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
goto out_unlock;
}
- if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
- index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+ if (attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
port = devlink_port_get_by_index(devlink, index);
if (!port) {
@@ -5632,8 +5661,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int devlink_nl_cmd_trap_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_trap_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct netlink_ext_ack *extack = info->extack;
struct devlink *devlink = info->user_ptr[0];
@@ -5667,9 +5695,9 @@ err_trap_fill:
return err;
}
-static int
-devlink_nl_cmd_trap_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb)
+static int devlink_nl_trap_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_trap_item *trap_item;
@@ -5684,8 +5712,7 @@ devlink_nl_cmd_trap_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
err = devlink_nl_trap_fill(msg, devlink, trap_item,
DEVLINK_CMD_TRAP_NEW,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, flags);
if (err) {
state->idx = idx;
break;
@@ -5696,9 +5723,10 @@ devlink_nl_cmd_trap_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
return err;
}
-const struct devlink_cmd devl_cmd_trap_get = {
- .dump_one = devlink_nl_cmd_trap_get_dump_one,
-};
+int devlink_nl_trap_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_trap_get_dump_one);
+}
static int __devlink_trap_action_set(struct devlink *devlink,
struct devlink_trap_item *trap_item,
@@ -5843,8 +5871,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int devlink_nl_cmd_trap_group_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_trap_group_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct netlink_ext_ack *extack = info->extack;
struct devlink *devlink = info->user_ptr[0];
@@ -5878,10 +5905,10 @@ err_trap_group_fill:
return err;
}
-static int
-devlink_nl_cmd_trap_group_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb)
+static int devlink_nl_trap_group_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_trap_group_item *group_item;
@@ -5897,8 +5924,7 @@ devlink_nl_cmd_trap_group_get_dump_one(struct sk_buff *msg,
err = devlink_nl_trap_group_fill(msg, devlink, group_item,
DEVLINK_CMD_TRAP_GROUP_NEW,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, flags);
if (err) {
state->idx = idx;
break;
@@ -5909,9 +5935,11 @@ devlink_nl_cmd_trap_group_get_dump_one(struct sk_buff *msg,
return err;
}
-const struct devlink_cmd devl_cmd_trap_group_get = {
- .dump_one = devlink_nl_cmd_trap_group_get_dump_one,
-};
+int devlink_nl_trap_group_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_trap_group_get_dump_one);
+}
static int
__devlink_trap_group_action_set(struct devlink *devlink,
@@ -6137,8 +6165,8 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int devlink_nl_cmd_trap_policer_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_trap_policer_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink_trap_policer_item *policer_item;
struct netlink_ext_ack *extack = info->extack;
@@ -6172,10 +6200,10 @@ err_trap_policer_fill:
return err;
}
-static int
-devlink_nl_cmd_trap_policer_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb)
+static int devlink_nl_trap_policer_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_trap_policer_item *policer_item;
@@ -6190,8 +6218,7 @@ devlink_nl_cmd_trap_policer_get_dump_one(struct sk_buff *msg,
err = devlink_nl_trap_policer_fill(msg, devlink, policer_item,
DEVLINK_CMD_TRAP_POLICER_NEW,
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI);
+ cb->nlh->nlmsg_seq, flags);
if (err) {
state->idx = idx;
break;
@@ -6202,9 +6229,11 @@ devlink_nl_cmd_trap_policer_get_dump_one(struct sk_buff *msg,
return err;
}
-const struct devlink_cmd devl_cmd_trap_policer_get = {
- .dump_one = devlink_nl_cmd_trap_policer_get_dump_one,
-};
+int devlink_nl_trap_policer_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_trap_policer_get_dump_one);
+}
static int
devlink_trap_policer_set(struct devlink *devlink,
@@ -6278,15 +6307,7 @@ static int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
return devlink_trap_policer_set(devlink, policer_item, info);
}
-const struct genl_small_ops devlink_nl_small_ops[54] = {
- {
- .cmd = DEVLINK_CMD_PORT_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- /* can be retrieved by unprivileged users */
- },
+const struct genl_small_ops devlink_nl_small_ops[40] = {
{
.cmd = DEVLINK_CMD_PORT_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -6295,17 +6316,9 @@ const struct genl_small_ops devlink_nl_small_ops[54] = {
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
- .cmd = DEVLINK_CMD_RATE_GET,
- .doit = devlink_nl_cmd_rate_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- .internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
- /* can be retrieved by unprivileged users */
- },
- {
.cmd = DEVLINK_CMD_RATE_SET,
.doit = devlink_nl_cmd_rate_set_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_RATE,
},
{
.cmd = DEVLINK_CMD_RATE_NEW,
@@ -6316,7 +6329,6 @@ const struct genl_small_ops devlink_nl_small_ops[54] = {
.cmd = DEVLINK_CMD_RATE_DEL,
.doit = devlink_nl_cmd_rate_del_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_RATE_NODE,
},
{
.cmd = DEVLINK_CMD_PORT_SPLIT,
@@ -6343,32 +6355,11 @@ const struct genl_small_ops devlink_nl_small_ops[54] = {
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
- {
- .cmd = DEVLINK_CMD_LINECARD_GET,
- .doit = devlink_nl_cmd_linecard_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
- /* can be retrieved by unprivileged users */
- },
+
{
.cmd = DEVLINK_CMD_LINECARD_SET,
.doit = devlink_nl_cmd_linecard_set_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_LINECARD,
- },
- {
- .cmd = DEVLINK_CMD_SB_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_SB_POOL_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_pool_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- /* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_SB_POOL_SET,
@@ -6377,14 +6368,6 @@ const struct genl_small_ops devlink_nl_small_ops[54] = {
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_port_pool_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- /* can be retrieved by unprivileged users */
- },
- {
.cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_port_pool_set_doit,
@@ -6392,14 +6375,6 @@ const struct genl_small_ops devlink_nl_small_ops[54] = {
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
- .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- /* can be retrieved by unprivileged users */
- },
- {
.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
@@ -6473,13 +6448,6 @@ const struct genl_small_ops devlink_nl_small_ops[54] = {
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = DEVLINK_CMD_PARAM_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_param_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- /* can be retrieved by unprivileged users */
- },
- {
.cmd = DEVLINK_CMD_PARAM_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_param_set_doit,
@@ -6501,13 +6469,6 @@ const struct genl_small_ops devlink_nl_small_ops[54] = {
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
- .cmd = DEVLINK_CMD_REGION_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_region_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- .flags = GENL_ADMIN_PERM,
- },
- {
.cmd = DEVLINK_CMD_REGION_NEW,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_region_new,
@@ -6527,14 +6488,6 @@ const struct genl_small_ops devlink_nl_small_ops[54] = {
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- /* can be retrieved by unprivileged users */
- },
- {
.cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_health_reporter_set_doit,
@@ -6583,45 +6536,21 @@ const struct genl_small_ops devlink_nl_small_ops[54] = {
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = DEVLINK_CMD_TRAP_GET,
- .doit = devlink_nl_cmd_trap_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- /* can be retrieved by unprivileged users */
- },
- {
.cmd = DEVLINK_CMD_TRAP_SET,
.doit = devlink_nl_cmd_trap_set_doit,
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = DEVLINK_CMD_TRAP_GROUP_GET,
- .doit = devlink_nl_cmd_trap_group_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- /* can be retrieved by unprivileged users */
- },
- {
.cmd = DEVLINK_CMD_TRAP_GROUP_SET,
.doit = devlink_nl_cmd_trap_group_set_doit,
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = DEVLINK_CMD_TRAP_POLICER_GET,
- .doit = devlink_nl_cmd_trap_policer_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- /* can be retrieved by unprivileged users */
- },
- {
.cmd = DEVLINK_CMD_TRAP_POLICER_SET,
.doit = devlink_nl_cmd_trap_policer_set_doit,
.flags = GENL_ADMIN_PERM,
},
{
- .cmd = DEVLINK_CMD_SELFTESTS_GET,
- .doit = devlink_nl_cmd_selftests_get_doit,
- .dumpit = devlink_nl_instance_iter_dumpit,
- /* can be retrieved by unprivileged users */
- },
- {
.cmd = DEVLINK_CMD_SELFTESTS_RUN,
.doit = devlink_nl_cmd_selftests_run,
.flags = GENL_ADMIN_PERM,
diff --git a/net/devlink/netlink.c b/net/devlink/netlink.c
index bada2819827b..72a5005a64cd 100644
--- a/net/devlink/netlink.c
+++ b/net/devlink/netlink.c
@@ -109,10 +109,9 @@ devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs)
return ERR_PTR(-ENODEV);
}
-int devlink_nl_pre_doit(const struct genl_split_ops *ops,
- struct sk_buff *skb, struct genl_info *info)
+static int __devlink_nl_pre_doit(struct sk_buff *skb, struct genl_info *info,
+ u8 flags)
{
- struct devlink_linecard *linecard;
struct devlink_port *devlink_port;
struct devlink *devlink;
int err;
@@ -122,42 +121,17 @@ int devlink_nl_pre_doit(const struct genl_split_ops *ops,
return PTR_ERR(devlink);
info->user_ptr[0] = devlink;
- if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
+ if (flags & DEVLINK_NL_FLAG_NEED_PORT) {
devlink_port = devlink_port_get_from_info(devlink, info);
if (IS_ERR(devlink_port)) {
err = PTR_ERR(devlink_port);
goto unlock;
}
info->user_ptr[1] = devlink_port;
- } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) {
+ } else if (flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) {
devlink_port = devlink_port_get_from_info(devlink, info);
if (!IS_ERR(devlink_port))
info->user_ptr[1] = devlink_port;
- } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE) {
- struct devlink_rate *devlink_rate;
-
- devlink_rate = devlink_rate_get_from_info(devlink, info);
- if (IS_ERR(devlink_rate)) {
- err = PTR_ERR(devlink_rate);
- goto unlock;
- }
- info->user_ptr[1] = devlink_rate;
- } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_RATE_NODE) {
- struct devlink_rate *rate_node;
-
- rate_node = devlink_rate_node_get_from_info(devlink, info);
- if (IS_ERR(rate_node)) {
- err = PTR_ERR(rate_node);
- goto unlock;
- }
- info->user_ptr[1] = rate_node;
- } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_LINECARD) {
- linecard = devlink_linecard_get_from_info(devlink, info);
- if (IS_ERR(linecard)) {
- err = PTR_ERR(linecard);
- goto unlock;
- }
- info->user_ptr[1] = linecard;
}
return 0;
@@ -167,6 +141,25 @@ unlock:
return err;
}
+int devlink_nl_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ return __devlink_nl_pre_doit(skb, info, ops->internal_flags);
+}
+
+int devlink_nl_pre_doit_port(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ return __devlink_nl_pre_doit(skb, info, DEVLINK_NL_FLAG_NEED_PORT);
+}
+
+int devlink_nl_pre_doit_port_optional(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
+ struct genl_info *info)
+{
+ return __devlink_nl_pre_doit(skb, info, DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT);
+}
+
void devlink_nl_post_doit(const struct genl_split_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
@@ -177,25 +170,30 @@ void devlink_nl_post_doit(const struct genl_split_ops *ops,
devlink_put(devlink);
}
-static const struct devlink_cmd *devl_cmds[] = {
- [DEVLINK_CMD_PORT_GET] = &devl_cmd_port_get,
- [DEVLINK_CMD_SB_GET] = &devl_cmd_sb_get,
- [DEVLINK_CMD_SB_POOL_GET] = &devl_cmd_sb_pool_get,
- [DEVLINK_CMD_SB_PORT_POOL_GET] = &devl_cmd_sb_port_pool_get,
- [DEVLINK_CMD_SB_TC_POOL_BIND_GET] = &devl_cmd_sb_tc_pool_bind_get,
- [DEVLINK_CMD_PARAM_GET] = &devl_cmd_param_get,
- [DEVLINK_CMD_REGION_GET] = &devl_cmd_region_get,
- [DEVLINK_CMD_HEALTH_REPORTER_GET] = &devl_cmd_health_reporter_get,
- [DEVLINK_CMD_TRAP_GET] = &devl_cmd_trap_get,
- [DEVLINK_CMD_TRAP_GROUP_GET] = &devl_cmd_trap_group_get,
- [DEVLINK_CMD_TRAP_POLICER_GET] = &devl_cmd_trap_policer_get,
- [DEVLINK_CMD_RATE_GET] = &devl_cmd_rate_get,
- [DEVLINK_CMD_LINECARD_GET] = &devl_cmd_linecard_get,
- [DEVLINK_CMD_SELFTESTS_GET] = &devl_cmd_selftests_get,
-};
+static int devlink_nl_inst_single_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb, int flags,
+ devlink_nl_dump_one_func_t *dump_one,
+ struct nlattr **attrs)
+{
+ struct devlink *devlink;
+ int err;
-int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
- devlink_nl_dump_one_func_t *dump_one)
+ devlink = devlink_get_from_attrs_lock(sock_net(msg->sk), attrs);
+ if (IS_ERR(devlink))
+ return PTR_ERR(devlink);
+ err = dump_one(msg, devlink, cb, flags | NLM_F_DUMP_FILTERED);
+
+ devl_unlock(devlink);
+ devlink_put(devlink);
+
+ if (err != -EMSGSIZE)
+ return err;
+ return msg->len;
+}
+
+static int devlink_nl_inst_iter_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb, int flags,
+ devlink_nl_dump_one_func_t *dump_one)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink *devlink;
@@ -206,7 +204,7 @@ int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
devl_lock(devlink);
if (devl_is_registered(devlink))
- err = dump_one(msg, devlink, cb);
+ err = dump_one(msg, devlink, cb, flags);
else
err = 0;
@@ -227,13 +225,19 @@ int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
return msg->len;
}
-int devlink_nl_instance_iter_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
+ devlink_nl_dump_one_func_t *dump_one)
{
- const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- const struct devlink_cmd *cmd = devl_cmds[info->op.cmd];
+ const struct genl_info *info = genl_info_dump(cb);
+ struct nlattr **attrs = info->attrs;
+ int flags = NLM_F_MULTI;
- return devlink_nl_dumpit(msg, cb, cmd->dump_one);
+ if (attrs &&
+ (attrs[DEVLINK_ATTR_BUS_NAME] || attrs[DEVLINK_ATTR_DEV_NAME]))
+ return devlink_nl_inst_single_dumpit(msg, cb, flags, dump_one,
+ attrs);
+ else
+ return devlink_nl_inst_iter_dumpit(msg, cb, flags, dump_one);
}
struct genl_family devlink_nl_family __ro_after_init = {
diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c
index 32d8cbed0c30..467b7a431de1 100644
--- a/net/devlink/netlink_gen.c
+++ b/net/devlink/netlink_gen.c
@@ -16,14 +16,199 @@ static const struct nla_policy devlink_get_nl_policy[DEVLINK_ATTR_DEV_NAME + 1]
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_PORT_GET - do */
+static const struct nla_policy devlink_port_get_do_nl_policy[DEVLINK_ATTR_PORT_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_PORT_GET - dump */
+static const struct nla_policy devlink_port_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_SB_GET - do */
+static const struct nla_policy devlink_sb_get_do_nl_policy[DEVLINK_ATTR_SB_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_SB_GET - dump */
+static const struct nla_policy devlink_sb_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_SB_POOL_GET - do */
+static const struct nla_policy devlink_sb_pool_get_do_nl_policy[DEVLINK_ATTR_SB_POOL_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16, },
+};
+
+/* DEVLINK_CMD_SB_POOL_GET - dump */
+static const struct nla_policy devlink_sb_pool_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_SB_PORT_POOL_GET - do */
+static const struct nla_policy devlink_sb_port_pool_get_do_nl_policy[DEVLINK_ATTR_SB_POOL_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16, },
+};
+
+/* DEVLINK_CMD_SB_PORT_POOL_GET - dump */
+static const struct nla_policy devlink_sb_port_pool_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_SB_TC_POOL_BIND_GET - do */
+static const struct nla_policy devlink_sb_tc_pool_bind_get_do_nl_policy[DEVLINK_ATTR_SB_TC_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_SB_POOL_TYPE] = NLA_POLICY_MAX(NLA_U8, 1),
+ [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16, },
+};
+
+/* DEVLINK_CMD_SB_TC_POOL_BIND_GET - dump */
+static const struct nla_policy devlink_sb_tc_pool_bind_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_PARAM_GET - do */
+static const struct nla_policy devlink_param_get_do_nl_policy[DEVLINK_ATTR_PARAM_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_PARAM_GET - dump */
+static const struct nla_policy devlink_param_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_REGION_GET - do */
+static const struct nla_policy devlink_region_get_do_nl_policy[DEVLINK_ATTR_REGION_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_REGION_GET - dump */
+static const struct nla_policy devlink_region_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
/* DEVLINK_CMD_INFO_GET - do */
static const struct nla_policy devlink_info_get_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_HEALTH_REPORTER_GET - do */
+static const struct nla_policy devlink_health_reporter_get_do_nl_policy[DEVLINK_ATTR_HEALTH_REPORTER_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_HEALTH_REPORTER_GET - dump */
+static const struct nla_policy devlink_health_reporter_get_dump_nl_policy[DEVLINK_ATTR_PORT_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_TRAP_GET - do */
+static const struct nla_policy devlink_trap_get_do_nl_policy[DEVLINK_ATTR_TRAP_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_TRAP_GET - dump */
+static const struct nla_policy devlink_trap_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_TRAP_GROUP_GET - do */
+static const struct nla_policy devlink_trap_group_get_do_nl_policy[DEVLINK_ATTR_TRAP_GROUP_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_TRAP_GROUP_GET - dump */
+static const struct nla_policy devlink_trap_group_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_TRAP_POLICER_GET - do */
+static const struct nla_policy devlink_trap_policer_get_do_nl_policy[DEVLINK_ATTR_TRAP_POLICER_ID + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_TRAP_POLICER_GET - dump */
+static const struct nla_policy devlink_trap_policer_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_RATE_GET - do */
+static const struct nla_policy devlink_rate_get_do_nl_policy[DEVLINK_ATTR_RATE_NODE_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_RATE_GET - dump */
+static const struct nla_policy devlink_rate_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_LINECARD_GET - do */
+static const struct nla_policy devlink_linecard_get_do_nl_policy[DEVLINK_ATTR_LINECARD_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_LINECARD_GET - dump */
+static const struct nla_policy devlink_linecard_get_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_SELFTESTS_GET - do */
+static const struct nla_policy devlink_selftests_get_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
/* Ops table for devlink */
-const struct genl_split_ops devlink_nl_ops[4] = {
+const struct genl_split_ops devlink_nl_ops[32] = {
{
.cmd = DEVLINK_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
@@ -41,6 +226,125 @@ const struct genl_split_ops devlink_nl_ops[4] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_PORT_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_port_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_port_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_PORT_INDEX,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_GET,
+ .dumpit = devlink_nl_port_get_dumpit,
+ .policy = devlink_port_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_sb_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_sb_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_SB_INDEX,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_GET,
+ .dumpit = devlink_nl_sb_get_dumpit,
+ .policy = devlink_sb_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_POOL_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_sb_pool_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_sb_pool_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_SB_POOL_INDEX,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_POOL_GET,
+ .dumpit = devlink_nl_sb_pool_get_dumpit,
+ .policy = devlink_sb_pool_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_sb_port_pool_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_sb_port_pool_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_SB_POOL_INDEX,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
+ .dumpit = devlink_nl_sb_port_pool_get_dumpit,
+ .policy = devlink_sb_port_pool_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_sb_tc_pool_bind_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_sb_tc_pool_bind_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_SB_TC_INDEX,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
+ .dumpit = devlink_nl_sb_tc_pool_bind_get_dumpit,
+ .policy = devlink_sb_tc_pool_bind_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_PARAM_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_param_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_param_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_PARAM_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_PARAM_GET,
+ .dumpit = devlink_nl_param_get_dumpit,
+ .policy = devlink_param_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_REGION_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port_optional,
+ .doit = devlink_nl_region_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_region_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_REGION_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_REGION_GET,
+ .dumpit = devlink_nl_region_get_dumpit,
+ .policy = devlink_region_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
.cmd = DEVLINK_CMD_INFO_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit,
@@ -56,4 +360,122 @@ const struct genl_split_ops devlink_nl_ops[4] = {
.dumpit = devlink_nl_info_get_dumpit,
.flags = GENL_CMD_CAP_DUMP,
},
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port_optional,
+ .doit = devlink_nl_health_reporter_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_health_reporter_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_HEALTH_REPORTER_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_GET,
+ .dumpit = devlink_nl_health_reporter_get_dumpit,
+ .policy = devlink_health_reporter_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_PORT_INDEX,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_TRAP_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_trap_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_trap_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_TRAP_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_TRAP_GET,
+ .dumpit = devlink_nl_trap_get_dumpit,
+ .policy = devlink_trap_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_TRAP_GROUP_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_trap_group_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_trap_group_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_TRAP_GROUP_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_TRAP_GROUP_GET,
+ .dumpit = devlink_nl_trap_group_get_dumpit,
+ .policy = devlink_trap_group_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_TRAP_POLICER_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_trap_policer_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_trap_policer_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_TRAP_POLICER_ID,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_TRAP_POLICER_GET,
+ .dumpit = devlink_nl_trap_policer_get_dumpit,
+ .policy = devlink_trap_policer_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_RATE_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_rate_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_rate_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_RATE_NODE_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_RATE_GET,
+ .dumpit = devlink_nl_rate_get_dumpit,
+ .policy = devlink_rate_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_LINECARD_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_linecard_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_linecard_get_do_nl_policy,
+ .maxattr = DEVLINK_ATTR_LINECARD_INDEX,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_LINECARD_GET,
+ .dumpit = devlink_nl_linecard_get_dumpit,
+ .policy = devlink_linecard_get_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_SELFTESTS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_selftests_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_selftests_get_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_SELFTESTS_GET,
+ .validate = GENL_DONT_VALIDATE_DUMP,
+ .dumpit = devlink_nl_selftests_get_dumpit,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
};
diff --git a/net/devlink/netlink_gen.h b/net/devlink/netlink_gen.h
index 11980e04a718..f8bbc93e39be 100644
--- a/net/devlink/netlink_gen.h
+++ b/net/devlink/netlink_gen.h
@@ -12,18 +12,68 @@
#include <uapi/linux/devlink.h>
/* Ops table for devlink */
-extern const struct genl_split_ops devlink_nl_ops[4];
+extern const struct genl_split_ops devlink_nl_ops[32];
int devlink_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
+int devlink_nl_pre_doit_port(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_pre_doit_port_optional(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
+ struct genl_info *info);
void
devlink_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
int devlink_nl_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int devlink_nl_port_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_port_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_sb_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_sb_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int devlink_nl_sb_pool_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_sb_pool_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_sb_port_pool_get_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_sb_port_pool_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_sb_tc_pool_bind_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_param_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_param_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_region_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_region_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
int devlink_nl_info_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_info_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_health_reporter_get_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_health_reporter_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_trap_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_trap_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_trap_group_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_trap_group_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_trap_policer_get_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_trap_policer_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_rate_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_rate_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_linecard_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_linecard_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_selftests_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_selftests_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
#endif /* _LINUX_DEVLINK_GEN_H */
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
index 61c40e889a4d..7b4bbd674bae 100644
--- a/net/ethtool/channels.c
+++ b/net/ethtool/channels.c
@@ -24,7 +24,7 @@ const struct nla_policy ethnl_channels_get_policy[] = {
static int channels_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct channels_reply_data *data = CHANNELS_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c
index 01a59ce211c8..83112c1a71ae 100644
--- a/net/ethtool/coalesce.c
+++ b/net/ethtool/coalesce.c
@@ -59,10 +59,9 @@ const struct nla_policy ethnl_coalesce_get_policy[] = {
static int coalesce_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct coalesce_reply_data *data = COALESCE_REPDATA(reply_base);
- struct netlink_ext_ack *extack = info ? info->extack : NULL;
struct net_device *dev = reply_base->dev;
int ret;
@@ -73,7 +72,8 @@ static int coalesce_prepare_data(const struct ethnl_req_info *req_base,
if (ret < 0)
return ret;
ret = dev->ethtool_ops->get_coalesce(dev, &data->coalesce,
- &data->kernel_coalesce, extack);
+ &data->kernel_coalesce,
+ info->extack);
ethnl_ops_complete(dev);
return ret;
diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c
index e4369769817e..0b2dea56d461 100644
--- a/net/ethtool/debug.c
+++ b/net/ethtool/debug.c
@@ -23,7 +23,7 @@ const struct nla_policy ethnl_debug_get_policy[] = {
static int debug_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct debug_reply_data *data = DEBUG_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
index 42104bcb0e47..2853394d06a8 100644
--- a/net/ethtool/eee.c
+++ b/net/ethtool/eee.c
@@ -26,7 +26,7 @@ const struct nla_policy ethnl_eee_get_policy[] = {
static int eee_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct eee_reply_data *data = EEE_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c
index 49c0a2a77f02..6209c3a9c8f7 100644
--- a/net/ethtool/eeprom.c
+++ b/net/ethtool/eeprom.c
@@ -51,8 +51,7 @@ static int fallback_set_params(struct eeprom_req_info *request,
}
static int eeprom_fallback(struct eeprom_req_info *request,
- struct eeprom_reply_data *reply,
- struct genl_info *info)
+ struct eeprom_reply_data *reply)
{
struct net_device *dev = reply->base.dev;
struct ethtool_modinfo modinfo = {0};
@@ -103,7 +102,7 @@ static int get_module_eeprom_by_page(struct net_device *dev,
static int eeprom_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct eeprom_reply_data *reply = MODULE_EEPROM_REPDATA(reply_base);
struct eeprom_req_info *request = MODULE_EEPROM_REQINFO(req_base);
@@ -124,7 +123,7 @@ static int eeprom_prepare_data(const struct ethnl_req_info *req_base,
if (ret)
goto err_free;
- ret = get_module_eeprom_by_page(dev, &page_data, info ? info->extack : NULL);
+ ret = get_module_eeprom_by_page(dev, &page_data, info->extack);
if (ret < 0)
goto err_ops;
@@ -140,7 +139,7 @@ err_free:
kfree(page_data.data);
if (ret == -EOPNOTSUPP)
- return eeprom_fallback(request, reply, info);
+ return eeprom_fallback(request, reply);
return ret;
}
diff --git a/net/ethtool/features.c b/net/ethtool/features.c
index 55d449a2d3fc..a79af8c25a07 100644
--- a/net/ethtool/features.c
+++ b/net/ethtool/features.c
@@ -35,7 +35,7 @@ static void ethnl_features_to_bitmap32(u32 *dest, netdev_features_t src)
static int features_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct features_reply_data *data = FEATURES_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/fec.c b/net/ethtool/fec.c
index 0d9a3d153170..e7d3f2c352a3 100644
--- a/net/ethtool/fec.c
+++ b/net/ethtool/fec.c
@@ -92,7 +92,7 @@ fec_stats_recalc(struct fec_stat_grp *grp, struct ethtool_fec_stat *stats)
static int fec_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(active_fec_modes) = {};
struct fec_reply_data *data = FEC_REPDATA(reply_base);
diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c
index 310dfe63292a..5c317d23787b 100644
--- a/net/ethtool/linkinfo.c
+++ b/net/ethtool/linkinfo.c
@@ -23,7 +23,7 @@ const struct nla_policy ethnl_linkinfo_get_policy[] = {
static int linkinfo_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct linkinfo_reply_data *data = LINKINFO_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
index 20165e07ef90..b2591db49f7d 100644
--- a/net/ethtool/linkmodes.c
+++ b/net/ethtool/linkmodes.c
@@ -27,7 +27,7 @@ const struct nla_policy ethnl_linkmodes_get_policy[] = {
static int linkmodes_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct linkmodes_reply_data *data = LINKMODES_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
index 2158c17a0b32..b2de2108b356 100644
--- a/net/ethtool/linkstate.c
+++ b/net/ethtool/linkstate.c
@@ -81,7 +81,7 @@ static int linkstate_get_link_ext_state(struct net_device *dev,
static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct linkstate_reply_data *data = LINKSTATE_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/mm.c b/net/ethtool/mm.c
index 4058a557b5a4..2816bb23c3ad 100644
--- a/net/ethtool/mm.c
+++ b/net/ethtool/mm.c
@@ -27,7 +27,7 @@ const struct nla_policy ethnl_mm_get_policy[ETHTOOL_A_MM_HEADER + 1] = {
static int mm_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct mm_reply_data *data = MM_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/module.c b/net/ethtool/module.c
index e0d539b21423..ceb575efc290 100644
--- a/net/ethtool/module.c
+++ b/net/ethtool/module.c
@@ -38,10 +38,9 @@ static int module_get_power_mode(struct net_device *dev,
static int module_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct module_reply_data *data = MODULE_REPDATA(reply_base);
- struct netlink_ext_ack *extack = info ? info->extack : NULL;
struct net_device *dev = reply_base->dev;
int ret;
@@ -49,7 +48,7 @@ static int module_prepare_data(const struct ethnl_req_info *req_base,
if (ret < 0)
return ret;
- ret = module_get_power_mode(dev, data, extack);
+ ret = module_get_power_mode(dev, data, info->extack);
if (ret < 0)
goto out_complete;
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index ae344f1b0bbd..3bbd5afb7b31 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -316,10 +316,8 @@ static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
/**
* ethnl_default_parse() - Parse request message
* @req_info: pointer to structure to put data into
- * @tb: parsed attributes
- * @net: request netns
+ * @info: genl_info from the request
* @request_ops: struct request_ops for request type
- * @extack: netlink extack for error reporting
* @require_dev: fail if no device identified in header
*
* Parse universal request header and call request specific ->parse_request()
@@ -328,19 +326,21 @@ static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
* Return: 0 on success or negative error code
*/
static int ethnl_default_parse(struct ethnl_req_info *req_info,
- struct nlattr **tb, struct net *net,
+ const struct genl_info *info,
const struct ethnl_request_ops *request_ops,
- struct netlink_ext_ack *extack, bool require_dev)
+ bool require_dev)
{
+ struct nlattr **tb = info->attrs;
int ret;
ret = ethnl_parse_header_dev_get(req_info, tb[request_ops->hdr_attr],
- net, extack, require_dev);
+ genl_info_net(info), info->extack,
+ require_dev);
if (ret < 0)
return ret;
if (request_ops->parse_request) {
- ret = request_ops->parse_request(req_info, tb, extack);
+ ret = request_ops->parse_request(req_info, tb, info->extack);
if (ret < 0)
return ret;
}
@@ -393,8 +393,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
}
- ret = ethnl_default_parse(req_info, info->attrs, genl_info_net(info),
- ops, info->extack, !ops->allow_nodev_do);
+ ret = ethnl_default_parse(req_info, info, ops, !ops->allow_nodev_do);
if (ret < 0)
goto err_dev;
ethnl_init_reply_data(reply_data, ops, req_info->dev);
@@ -445,12 +444,12 @@ err_dev:
static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
const struct ethnl_dump_ctx *ctx,
- struct netlink_callback *cb)
+ const struct genl_info *info)
{
void *ehdr;
int ret;
- ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ ehdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
&ethtool_genl_family, NLM_F_MULTI,
ctx->ops->reply_cmd);
if (!ehdr)
@@ -458,7 +457,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev);
rtnl_lock();
- ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, NULL);
+ ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, info);
rtnl_unlock();
if (ret < 0)
goto out;
@@ -496,7 +495,7 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
dev_hold(dev);
rtnl_unlock();
- ret = ethnl_default_dump_one(skb, dev, ctx, cb);
+ ret = ethnl_default_dump_one(skb, dev, ctx, genl_info_dump(cb));
rtnl_lock();
dev_put(dev);
@@ -538,8 +537,7 @@ static int ethnl_default_start(struct netlink_callback *cb)
goto free_req_info;
}
- ret = ethnl_default_parse(req_info, info->attrs, sock_net(cb->skb->sk),
- ops, cb->extack, false);
+ ret = ethnl_default_parse(req_info, &info->info, ops, false);
if (req_info->dev) {
/* We ignore device specification in dump requests but as the
* same parser as for non-dump (doit) requests is used, it
@@ -649,11 +647,14 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
struct ethnl_reply_data *reply_data;
const struct ethnl_request_ops *ops;
struct ethnl_req_info *req_info;
+ struct genl_info info;
struct sk_buff *skb;
void *reply_payload;
int reply_len;
int ret;
+ genl_info_init_ntf(&info, &ethtool_genl_family, cmd);
+
if (WARN_ONCE(cmd > ETHTOOL_MSG_KERNEL_MAX ||
!ethnl_default_notify_ops[cmd],
"unexpected notification type %u\n", cmd))
@@ -672,7 +673,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
req_info->flags |= ETHTOOL_FLAG_COMPACT_BITSETS;
ethnl_init_reply_data(reply_data, ops, dev);
- ret = ops->prepare_data(req_info, reply_data, NULL);
+ ret = ops->prepare_data(req_info, reply_data, &info);
if (ret < 0)
goto err_cleanup;
ret = ops->reply_size(req_info, reply_data);
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 79424b34b553..9a333a8d04c1 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -355,7 +355,7 @@ struct ethnl_request_ops {
struct netlink_ext_ack *extack);
int (*prepare_data)(const struct ethnl_req_info *req_info,
struct ethnl_reply_data *reply_data,
- struct genl_info *info);
+ const struct genl_info *info);
int (*reply_size)(const struct ethnl_req_info *req_info,
const struct ethnl_reply_data *reply_data);
int (*fill_reply)(struct sk_buff *skb,
diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
index 6657d0b888d8..f7c847aeb1a2 100644
--- a/net/ethtool/pause.c
+++ b/net/ethtool/pause.c
@@ -51,10 +51,9 @@ static int pause_parse_request(struct ethnl_req_info *req_base,
static int pause_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
const struct pause_req_info *req_info = PAUSE_REQINFO(req_base);
- struct netlink_ext_ack *extack = info ? info->extack : NULL;
struct pause_reply_data *data = PAUSE_REPDATA(reply_base);
enum ethtool_mac_stats_src src = req_info->src;
struct net_device *dev = reply_base->dev;
@@ -74,7 +73,7 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
if ((src == ETHTOOL_MAC_STATS_SRC_EMAC ||
src == ETHTOOL_MAC_STATS_SRC_PMAC) &&
!__ethtool_dev_mm_supported(dev)) {
- NL_SET_ERR_MSG_MOD(extack,
+ NL_SET_ERR_MSG_MOD(info->extack,
"Device does not support MAC merge layer");
ethnl_ops_complete(dev);
return -EOPNOTSUPP;
diff --git a/net/ethtool/phc_vclocks.c b/net/ethtool/phc_vclocks.c
index 637b2f5297d5..cadaabed60bd 100644
--- a/net/ethtool/phc_vclocks.c
+++ b/net/ethtool/phc_vclocks.c
@@ -24,7 +24,7 @@ const struct nla_policy ethnl_phc_vclocks_get_policy[] = {
static int phc_vclocks_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct phc_vclocks_reply_data *data = PHC_VCLOCKS_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c
index 5a8cab4df0c9..b238a1afe9ae 100644
--- a/net/ethtool/plca.c
+++ b/net/ethtool/plca.c
@@ -40,7 +40,7 @@ const struct nla_policy ethnl_plca_get_cfg_policy[] = {
static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct plca_reply_data *data = PLCA_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
@@ -183,7 +183,7 @@ const struct nla_policy ethnl_plca_get_status_policy[] = {
static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct plca_reply_data *data = PLCA_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/privflags.c b/net/ethtool/privflags.c
index 23264a1ebf12..297be6a13ab9 100644
--- a/net/ethtool/privflags.c
+++ b/net/ethtool/privflags.c
@@ -57,7 +57,7 @@ static int ethnl_get_priv_flags_info(struct net_device *dev,
static int privflags_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct privflags_reply_data *data = PRIVFLAGS_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c
index 530b8b99e6df..cc478af77111 100644
--- a/net/ethtool/pse-pd.c
+++ b/net/ethtool/pse-pd.c
@@ -53,8 +53,8 @@ static int pse_get_pse_attributes(struct net_device *dev,
}
static int pse_prepare_data(const struct ethnl_req_info *req_base,
- struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ struct ethnl_reply_data *reply_base,
+ const struct genl_info *info)
{
struct pse_reply_data *data = PSE_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
@@ -64,7 +64,7 @@ static int pse_prepare_data(const struct ethnl_req_info *req_base,
if (ret < 0)
return ret;
- ret = pse_get_pse_attributes(dev, info ? info->extack : NULL, data);
+ ret = pse_get_pse_attributes(dev, info->extack, data);
ethnl_ops_complete(dev);
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
index 1c4972526142..fb09f774ea01 100644
--- a/net/ethtool/rings.c
+++ b/net/ethtool/rings.c
@@ -24,10 +24,9 @@ const struct nla_policy ethnl_rings_get_policy[] = {
static int rings_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct rings_reply_data *data = RINGS_REPDATA(reply_base);
- struct netlink_ext_ack *extack = info ? info->extack : NULL;
struct net_device *dev = reply_base->dev;
int ret;
@@ -39,7 +38,7 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base,
if (ret < 0)
return ret;
dev->ethtool_ops->get_ringparam(dev, &data->ringparam,
- &data->kernel_ringparam, extack);
+ &data->kernel_ringparam, info->extack);
ethnl_ops_complete(dev);
return 0;
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index be260ab34e58..5764202e6cb6 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -42,7 +42,8 @@ rss_parse_request(struct ethnl_req_info *req_info, struct nlattr **tb,
static int
rss_prepare_data(const struct ethnl_req_info *req_base,
- struct ethnl_reply_data *reply_base, struct genl_info *info)
+ struct ethnl_reply_data *reply_base,
+ const struct genl_info *info)
{
struct rss_reply_data *data = RSS_REPDATA(reply_base);
struct rss_req_info *request = RSS_REQINFO(req_base);
diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c
index 010ed19ccc99..912f0c4fff2f 100644
--- a/net/ethtool/stats.c
+++ b/net/ethtool/stats.c
@@ -114,10 +114,9 @@ static int stats_parse_request(struct ethnl_req_info *req_base,
static int stats_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
const struct stats_req_info *req_info = STATS_REQINFO(req_base);
- struct netlink_ext_ack *extack = info ? info->extack : NULL;
struct stats_reply_data *data = STATS_REPDATA(reply_base);
enum ethtool_mac_stats_src src = req_info->src;
struct net_device *dev = reply_base->dev;
@@ -130,7 +129,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
if ((src == ETHTOOL_MAC_STATS_SRC_EMAC ||
src == ETHTOOL_MAC_STATS_SRC_PMAC) &&
!__ethtool_dev_mm_supported(dev)) {
- NL_SET_ERR_MSG_MOD(extack,
+ NL_SET_ERR_MSG_MOD(info->extack,
"Device does not support MAC merge layer");
ethnl_ops_complete(dev);
return -EOPNOTSUPP;
diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
index 3f7de54d85fb..c678b484a079 100644
--- a/net/ethtool/strset.c
+++ b/net/ethtool/strset.c
@@ -274,7 +274,7 @@ static int strset_prepare_set(struct strset_info *info, struct net_device *dev,
static int strset_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
const struct strset_req_info *req_info = STRSET_REQINFO(req_base);
struct strset_reply_data *data = STRSET_REPDATA(reply_base);
diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c
index 63b5814bd460..9daed0aab162 100644
--- a/net/ethtool/tsinfo.c
+++ b/net/ethtool/tsinfo.c
@@ -25,7 +25,7 @@ const struct nla_policy ethnl_tsinfo_get_policy[] = {
static int tsinfo_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c
index 05f752557b5e..b4ce47dd2aa6 100644
--- a/net/ethtool/tunnels.c
+++ b/net/ethtool/tunnels.c
@@ -219,7 +219,7 @@ int ethnl_tunnel_info_start(struct netlink_callback *cb)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
- struct nlattr **tb = info->attrs;
+ struct nlattr **tb = info->info.attrs;
int ret;
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c
index a4a43d9e6e9d..0ed56c9ac1bc 100644
--- a/net/ethtool/wol.c
+++ b/net/ethtool/wol.c
@@ -24,7 +24,7 @@ const struct nla_policy ethnl_wol_get_policy[] = {
static int wol_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
- struct genl_info *info)
+ const struct genl_info *info)
{
struct wol_reply_data *data = WOL_REPDATA(reply_base);
struct net_device *dev = reply_base->dev;
@@ -39,7 +39,8 @@ static int wol_prepare_data(const struct ethnl_req_info *req_base,
dev->ethtool_ops->get_wol(dev, &data->wol);
ethnl_ops_complete(dev);
/* do not include password in notifications */
- data->show_sopass = info && (data->wol.supported & WAKE_MAGICSECURE);
+ data->show_sopass = !genl_info_is_ntf(info) &&
+ (data->wol.supported & WAKE_MAGICSECURE);
return 0;
}
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index d610c1886160..1a265a421308 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -262,7 +262,7 @@ nl802154_prepare_wpan_dev_dump(struct sk_buff *skb,
if (!cb->args[0]) {
*wpan_dev = __cfg802154_wpan_dev_from_attrs(sock_net(skb->sk),
- info->attrs);
+ info->info.attrs);
if (IS_ERR(*wpan_dev)) {
err = PTR_ERR(*wpan_dev);
goto out_unlock;
@@ -570,7 +570,7 @@ static int nl802154_dump_wpan_phy_parse(struct sk_buff *skb,
struct nl802154_dump_wpan_phy_state *state)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- struct nlattr **tb = info->attrs;
+ struct nlattr **tb = info->info.attrs;
if (tb[NL802154_ATTR_WPAN_PHY])
state->filter_wpan_phy = nla_get_u32(tb[NL802154_ATTR_WPAN_PHY]);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9b2ca2fcc5a1..e07ee60625d9 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -187,24 +187,13 @@ static int inet_autobind(struct sock *sk)
return 0;
}
-/*
- * Move a socket into listening state.
- */
-int inet_listen(struct socket *sock, int backlog)
+int __inet_listen_sk(struct sock *sk, int backlog)
{
- struct sock *sk = sock->sk;
- unsigned char old_state;
+ unsigned char old_state = sk->sk_state;
int err, tcp_fastopen;
- lock_sock(sk);
-
- err = -EINVAL;
- if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
- goto out;
-
- old_state = sk->sk_state;
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
- goto out;
+ return -EINVAL;
WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
/* Really, if the socket is already in listen state
@@ -227,10 +216,27 @@ int inet_listen(struct socket *sock, int backlog)
err = inet_csk_listen_start(sk);
if (err)
- goto out;
+ return err;
+
tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
}
- err = 0;
+ return 0;
+}
+
+/*
+ * Move a socket into listening state.
+ */
+int inet_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ int err = -EINVAL;
+
+ lock_sock(sk);
+
+ if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
+ goto out;
+
+ err = __inet_listen_sk(sk, backlog);
out:
release_sock(sk);
@@ -325,14 +331,14 @@ lookup_protocol:
sk->sk_reuse = SK_CAN_REUSE;
inet = inet_sk(sk);
- inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
+ inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
- inet->nodefrag = 0;
+ inet_clear_bit(NODEFRAG, sk);
if (SOCK_RAW == sock->type) {
inet->inet_num = protocol;
if (IPPROTO_RAW == protocol)
- inet->hdrincl = 1;
+ inet_set_bit(HDRINCL, sk);
}
if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
@@ -350,9 +356,9 @@ lookup_protocol:
sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
inet->uc_ttl = -1;
- inet->mc_loop = 1;
+ inet_set_bit(MC_LOOP, sk);
inet->mc_ttl = 1;
- inet->mc_all = 1;
+ inet_set_bit(MC_ALL, sk);
inet->mc_index = 0;
inet->mc_list = NULL;
inet->rcv_tos = 0;
@@ -431,9 +437,8 @@ int inet_release(struct socket *sock)
}
EXPORT_SYMBOL(inet_release);
-int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
- struct sock *sk = sock->sk;
u32 flags = BIND_WITH_LOCK;
int err;
@@ -454,6 +459,11 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return __inet_bind(sk, uaddr, addr_len, flags);
}
+
+int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+{
+ return inet_bind_sk(sock->sk, uaddr, addr_len);
+}
EXPORT_SYMBOL(inet_bind);
int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
@@ -519,7 +529,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
inet->inet_saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */
- if (snum || !(inet->bind_address_no_port ||
+ if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) ||
(flags & BIND_FORCE_ADDRESS_NO_PORT))) {
err = sk->sk_prot->get_port(sk, snum);
if (err) {
@@ -646,7 +656,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
err = -EISCONN;
goto out;
case SS_CONNECTING:
- if (inet_sk(sk)->defer_connect)
+ if (inet_test_bit(DEFER_CONNECT, sk))
err = is_sendmsg ? -EINPROGRESS : -EISCONN;
else
err = -EALREADY;
@@ -669,7 +679,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
sock->state = SS_CONNECTING;
- if (!err && inet_sk(sk)->defer_connect)
+ if (!err && inet_test_bit(DEFER_CONNECT, sk))
goto out;
/* Just entered SS_CONNECTING state; the only
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 79ae7204e8ed..d048aa833293 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1881,7 +1881,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
old = rcu_dereference_protected(sk_inet->inet_opt,
lockdep_sock_is_held(sk));
- if (sk_inet->is_icsk) {
+ if (inet_test_bit(IS_ICSK, sk)) {
sk_conn = inet_csk(sk);
if (old)
sk_conn->icsk_ext_hdr_len -= old->opt.optlen;
@@ -2051,7 +2051,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
sk_inet = inet_sk(sk);
hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
- if (sk_inet->is_icsk && hdr_delta > 0) {
+ if (inet_test_bit(IS_ICSK, sk) && hdr_delta > 0) {
struct inet_connection_sock *sk_conn = inet_csk(sk);
sk_conn->icsk_ext_hdr_len -= hdr_delta;
sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 48ff5f13e797..0c9e768e5628 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2658,7 +2658,7 @@ int ip_mc_sf_allow(const struct sock *sk, __be32 loc_addr, __be32 rmt_addr,
(sdif && pmc->multi.imr_ifindex == sdif)))
break;
}
- ret = inet->mc_all;
+ ret = inet_test_bit(MC_ALL, sk);
if (!pmc)
goto unlock;
psl = rcu_dereference(pmc->sflist);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index f7426926a104..e13a84433413 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -182,17 +182,17 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
r->idiag_inode = sock_i_ino(sk);
memset(&inet_sockopt, 0, sizeof(inet_sockopt));
- inet_sockopt.recverr = inet->recverr;
- inet_sockopt.is_icsk = inet->is_icsk;
- inet_sockopt.freebind = inet->freebind;
- inet_sockopt.hdrincl = inet->hdrincl;
- inet_sockopt.mc_loop = inet->mc_loop;
- inet_sockopt.transparent = inet->transparent;
- inet_sockopt.mc_all = inet->mc_all;
- inet_sockopt.nodefrag = inet->nodefrag;
- inet_sockopt.bind_address_no_port = inet->bind_address_no_port;
- inet_sockopt.recverr_rfc4884 = inet->recverr_rfc4884;
- inet_sockopt.defer_connect = inet->defer_connect;
+ inet_sockopt.recverr = inet_test_bit(RECVERR, sk);
+ inet_sockopt.is_icsk = inet_test_bit(IS_ICSK, sk);
+ inet_sockopt.freebind = inet_test_bit(FREEBIND, sk);
+ inet_sockopt.hdrincl = inet_test_bit(HDRINCL, sk);
+ inet_sockopt.mc_loop = inet_test_bit(MC_LOOP, sk);
+ inet_sockopt.transparent = inet_test_bit(TRANSPARENT, sk);
+ inet_sockopt.mc_all = inet_test_bit(MC_ALL, sk);
+ inet_sockopt.nodefrag = inet_test_bit(NODEFRAG, sk);
+ inet_sockopt.bind_address_no_port = inet_test_bit(BIND_ADDRESS_NO_PORT, sk);
+ inet_sockopt.recverr_rfc4884 = inet_test_bit(RECVERR_RFC4884, sk);
+ inet_sockopt.defer_connect = inet_test_bit(DEFER_CONNECT, sk);
if (nla_put(skb, INET_DIAG_SOCKOPT, sizeof(inet_sockopt),
&inet_sockopt))
goto errout;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2c1b245dba8e..dd37a5bf6881 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -203,7 +203,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
tw->tw_reuseport = sk->sk_reuseport;
tw->tw_hash = sk->sk_hash;
tw->tw_ipv6only = 0;
- tw->tw_transparent = inet->transparent;
+ tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
tw->tw_prot = sk->sk_prot_creator;
atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
twsk_net_set(tw, sock_net(sk));
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index f28c87533a46..ce6257860a40 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -133,7 +133,7 @@ EXPORT_SYMBOL_GPL(ip_local_out);
static inline int ip_select_ttl(const struct inet_sock *inet,
const struct dst_entry *dst)
{
- int ttl = inet->uc_ttl;
+ int ttl = READ_ONCE(inet->uc_ttl);
if (ttl < 0)
ttl = ip4_dst_hoplimit(dst);
@@ -1039,7 +1039,7 @@ static int __ip_append_data(struct sock *sk,
}
}
} else if ((flags & MSG_SPLICE_PAGES) && length) {
- if (inet->hdrincl)
+ if (inet_test_bit(HDRINCL, sk))
return -EPERM;
if (rt->dst.dev->features & NETIF_F_SG &&
getfrag == ip_generic_getfrag)
@@ -1467,7 +1467,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
* so icmphdr does not in skb linear region and can not get icmp_type
* by icmp_hdr(skb)->type.
*/
- if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl)
+ if (sk->sk_type == SOCK_RAW &&
+ !inet_test_bit(HDRINCL, sk))
icmp_type = fl4->fl4_icmp_type;
else
icmp_type = icmp_hdr(skb)->type;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index d41bce8927b2..61b2e7bc7031 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -171,8 +171,10 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, int tlen, int offset)
{
- struct inet_sock *inet = inet_sk(sk);
- unsigned int flags = inet->cmsg_flags;
+ unsigned long flags = inet_cmsg_flags(inet_sk(sk));
+
+ if (!flags)
+ return;
/* Ordered by supposed usage frequency */
if (flags & IP_CMSG_PKTINFO) {
@@ -431,7 +433,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
serr->port = port;
if (skb_pull(skb, payload - skb->data)) {
- if (inet_sk(sk)->recverr_rfc4884)
+ if (inet_test_bit(RECVERR_RFC4884, sk))
ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
skb_reset_transport_header(skb);
@@ -444,12 +446,11 @@ EXPORT_SYMBOL_GPL(ip_icmp_error);
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
{
- struct inet_sock *inet = inet_sk(sk);
struct sock_exterr_skb *serr;
struct iphdr *iph;
struct sk_buff *skb;
- if (!inet->recverr)
+ if (!inet_test_bit(RECVERR, sk))
return;
skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
@@ -568,7 +569,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
- if (inet_sk(sk)->cmsg_flags)
+ if (inet_cmsg_flags(inet_sk(sk)))
ip_cmsg_recv(msg, skb);
}
@@ -607,17 +608,13 @@ EXPORT_SYMBOL(ip_sock_set_tos);
void ip_sock_set_freebind(struct sock *sk)
{
- lock_sock(sk);
- inet_sk(sk)->freebind = true;
- release_sock(sk);
+ inet_set_bit(FREEBIND, sk);
}
EXPORT_SYMBOL(ip_sock_set_freebind);
void ip_sock_set_recverr(struct sock *sk)
{
- lock_sock(sk);
- inet_sk(sk)->recverr = true;
- release_sock(sk);
+ inet_set_bit(RECVERR, sk);
}
EXPORT_SYMBOL(ip_sock_set_recverr);
@@ -634,9 +631,7 @@ EXPORT_SYMBOL(ip_sock_set_mtu_discover);
void ip_sock_set_pktinfo(struct sock *sk)
{
- lock_sock(sk);
- inet_sk(sk)->cmsg_flags |= IP_CMSG_PKTINFO;
- release_sock(sk);
+ inet_set_bit(PKTINFO, sk);
}
EXPORT_SYMBOL(ip_sock_set_pktinfo);
@@ -950,6 +945,104 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
if (ip_mroute_opt(optname))
return ip_mroute_setsockopt(sk, optname, optval, optlen);
+ /* Handle options that can be set without locking the socket. */
+ switch (optname) {
+ case IP_PKTINFO:
+ inet_assign_bit(PKTINFO, sk, val);
+ return 0;
+ case IP_RECVTTL:
+ inet_assign_bit(TTL, sk, val);
+ return 0;
+ case IP_RECVTOS:
+ inet_assign_bit(TOS, sk, val);
+ return 0;
+ case IP_RECVOPTS:
+ inet_assign_bit(RECVOPTS, sk, val);
+ return 0;
+ case IP_RETOPTS:
+ inet_assign_bit(RETOPTS, sk, val);
+ return 0;
+ case IP_PASSSEC:
+ inet_assign_bit(PASSSEC, sk, val);
+ return 0;
+ case IP_RECVORIGDSTADDR:
+ inet_assign_bit(ORIGDSTADDR, sk, val);
+ return 0;
+ case IP_RECVFRAGSIZE:
+ if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
+ return -EINVAL;
+ inet_assign_bit(RECVFRAGSIZE, sk, val);
+ return 0;
+ case IP_RECVERR:
+ inet_assign_bit(RECVERR, sk, val);
+ if (!val)
+ skb_queue_purge(&sk->sk_error_queue);
+ return 0;
+ case IP_RECVERR_RFC4884:
+ if (val < 0 || val > 1)
+ return -EINVAL;
+ inet_assign_bit(RECVERR_RFC4884, sk, val);
+ return 0;
+ case IP_FREEBIND:
+ if (optlen < 1)
+ return -EINVAL;
+ inet_assign_bit(FREEBIND, sk, val);
+ return 0;
+ case IP_HDRINCL:
+ if (sk->sk_type != SOCK_RAW)
+ return -ENOPROTOOPT;
+ inet_assign_bit(HDRINCL, sk, val);
+ return 0;
+ case IP_MULTICAST_LOOP:
+ if (optlen < 1)
+ return -EINVAL;
+ inet_assign_bit(MC_LOOP, sk, val);
+ return 0;
+ case IP_MULTICAST_ALL:
+ if (optlen < 1)
+ return -EINVAL;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ inet_assign_bit(MC_ALL, sk, val);
+ return 0;
+ case IP_TRANSPARENT:
+ if (!!val && !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
+ !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+ if (optlen < 1)
+ goto e_inval;
+ inet_assign_bit(TRANSPARENT, sk, val);
+ return 0;
+ case IP_NODEFRAG:
+ if (sk->sk_type != SOCK_RAW)
+ return -ENOPROTOOPT;
+ inet_assign_bit(NODEFRAG, sk, val);
+ return 0;
+ case IP_BIND_ADDRESS_NO_PORT:
+ inet_assign_bit(BIND_ADDRESS_NO_PORT, sk, val);
+ return 0;
+ case IP_TTL:
+ if (optlen < 1)
+ return -EINVAL;
+ if (val != -1 && (val < 1 || val > 255))
+ return -EINVAL;
+ WRITE_ONCE(inet->uc_ttl, val);
+ return 0;
+ case IP_MINTTL:
+ if (optlen < 1)
+ return -EINVAL;
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ if (val)
+ static_branch_enable(&ip4_min_ttl);
+
+ WRITE_ONCE(inet->min_ttl, val);
+ return 0;
+ }
+
err = 0;
if (needs_rtnl)
rtnl_lock();
@@ -967,7 +1060,7 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
break;
old = rcu_dereference_protected(inet->inet_opt,
lockdep_sock_is_held(sk));
- if (inet->is_icsk) {
+ if (inet_test_bit(IS_ICSK, sk)) {
struct inet_connection_sock *icsk = inet_csk(sk);
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET ||
@@ -989,111 +1082,27 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
kfree_rcu(old, rcu);
break;
}
- case IP_PKTINFO:
- if (val)
- inet->cmsg_flags |= IP_CMSG_PKTINFO;
- else
- inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
- break;
- case IP_RECVTTL:
- if (val)
- inet->cmsg_flags |= IP_CMSG_TTL;
- else
- inet->cmsg_flags &= ~IP_CMSG_TTL;
- break;
- case IP_RECVTOS:
- if (val)
- inet->cmsg_flags |= IP_CMSG_TOS;
- else
- inet->cmsg_flags &= ~IP_CMSG_TOS;
- break;
- case IP_RECVOPTS:
- if (val)
- inet->cmsg_flags |= IP_CMSG_RECVOPTS;
- else
- inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
- break;
- case IP_RETOPTS:
- if (val)
- inet->cmsg_flags |= IP_CMSG_RETOPTS;
- else
- inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
- break;
- case IP_PASSSEC:
- if (val)
- inet->cmsg_flags |= IP_CMSG_PASSSEC;
- else
- inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
- break;
- case IP_RECVORIGDSTADDR:
- if (val)
- inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
- else
- inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
- break;
case IP_CHECKSUM:
if (val) {
- if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
+ if (!(inet_test_bit(CHECKSUM, sk))) {
inet_inc_convert_csum(sk);
- inet->cmsg_flags |= IP_CMSG_CHECKSUM;
+ inet_set_bit(CHECKSUM, sk);
}
} else {
- if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
+ if (inet_test_bit(CHECKSUM, sk)) {
inet_dec_convert_csum(sk);
- inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
+ inet_clear_bit(CHECKSUM, sk);
}
}
break;
- case IP_RECVFRAGSIZE:
- if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
- goto e_inval;
- if (val)
- inet->cmsg_flags |= IP_CMSG_RECVFRAGSIZE;
- else
- inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE;
- break;
case IP_TOS: /* This sets both TOS and Precedence */
__ip_sock_set_tos(sk, val);
break;
- case IP_TTL:
- if (optlen < 1)
- goto e_inval;
- if (val != -1 && (val < 1 || val > 255))
- goto e_inval;
- inet->uc_ttl = val;
- break;
- case IP_HDRINCL:
- if (sk->sk_type != SOCK_RAW) {
- err = -ENOPROTOOPT;
- break;
- }
- inet->hdrincl = val ? 1 : 0;
- break;
- case IP_NODEFRAG:
- if (sk->sk_type != SOCK_RAW) {
- err = -ENOPROTOOPT;
- break;
- }
- inet->nodefrag = val ? 1 : 0;
- break;
- case IP_BIND_ADDRESS_NO_PORT:
- inet->bind_address_no_port = val ? 1 : 0;
- break;
case IP_MTU_DISCOVER:
if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
goto e_inval;
inet->pmtudisc = val;
break;
- case IP_RECVERR:
- inet->recverr = !!val;
- if (!val)
- skb_queue_purge(&sk->sk_error_queue);
- break;
- case IP_RECVERR_RFC4884:
- if (val < 0 || val > 1)
- goto e_inval;
- inet->recverr_rfc4884 = !!val;
- break;
case IP_MULTICAST_TTL:
if (sk->sk_type == SOCK_STREAM)
goto e_inval;
@@ -1105,11 +1114,6 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
goto e_inval;
inet->mc_ttl = val;
break;
- case IP_MULTICAST_LOOP:
- if (optlen < 1)
- goto e_inval;
- inet->mc_loop = !!val;
- break;
case IP_UNICAST_IF:
{
struct net_device *dev = NULL;
@@ -1214,7 +1218,7 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
struct ip_mreqn mreq;
err = -EPROTO;
- if (inet_sk(sk)->is_icsk)
+ if (inet_test_bit(IS_ICSK, sk))
break;
if (optlen < sizeof(struct ip_mreq))
@@ -1325,20 +1329,6 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
else
err = ip_set_mcast_msfilter(sk, optval, optlen);
break;
- case IP_MULTICAST_ALL:
- if (optlen < 1)
- goto e_inval;
- if (val != 0 && val != 1)
- goto e_inval;
- inet->mc_all = val;
- break;
-
- case IP_FREEBIND:
- if (optlen < 1)
- goto e_inval;
- inet->freebind = !!val;
- break;
-
case IP_IPSEC_POLICY:
case IP_XFRM_POLICY:
err = -EPERM;
@@ -1347,32 +1337,6 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
err = xfrm_user_policy(sk, optname, optval, optlen);
break;
- case IP_TRANSPARENT:
- if (!!val && !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
- !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
- err = -EPERM;
- break;
- }
- if (optlen < 1)
- goto e_inval;
- inet->transparent = !!val;
- break;
-
- case IP_MINTTL:
- if (optlen < 1)
- goto e_inval;
- if (val < 0 || val > 255)
- goto e_inval;
-
- if (val)
- static_branch_enable(&ip4_min_ttl);
-
- /* tcp_v4_err() and tcp_v4_rcv() might read min_ttl
- * while we are changint it.
- */
- WRITE_ONCE(inet->min_ttl, val);
- break;
-
case IP_LOCAL_PORT_RANGE:
{
const __u16 lo = val;
@@ -1415,7 +1379,7 @@ e_inval:
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
{
struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
- bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
+ bool prepare = inet_test_bit(PKTINFO, sk) ||
ipv6_sk_rxinfo(sk);
if (prepare && skb_rtable(skb)) {
@@ -1566,6 +1530,72 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
if (len < 0)
return -EINVAL;
+ /* Handle options that can be read without locking the socket. */
+ switch (optname) {
+ case IP_PKTINFO:
+ val = inet_test_bit(PKTINFO, sk);
+ goto copyval;
+ case IP_RECVTTL:
+ val = inet_test_bit(TTL, sk);
+ goto copyval;
+ case IP_RECVTOS:
+ val = inet_test_bit(TOS, sk);
+ goto copyval;
+ case IP_RECVOPTS:
+ val = inet_test_bit(RECVOPTS, sk);
+ goto copyval;
+ case IP_RETOPTS:
+ val = inet_test_bit(RETOPTS, sk);
+ goto copyval;
+ case IP_PASSSEC:
+ val = inet_test_bit(PASSSEC, sk);
+ goto copyval;
+ case IP_RECVORIGDSTADDR:
+ val = inet_test_bit(ORIGDSTADDR, sk);
+ goto copyval;
+ case IP_CHECKSUM:
+ val = inet_test_bit(CHECKSUM, sk);
+ goto copyval;
+ case IP_RECVFRAGSIZE:
+ val = inet_test_bit(RECVFRAGSIZE, sk);
+ goto copyval;
+ case IP_RECVERR:
+ val = inet_test_bit(RECVERR, sk);
+ goto copyval;
+ case IP_RECVERR_RFC4884:
+ val = inet_test_bit(RECVERR_RFC4884, sk);
+ goto copyval;
+ case IP_FREEBIND:
+ val = inet_test_bit(FREEBIND, sk);
+ goto copyval;
+ case IP_HDRINCL:
+ val = inet_test_bit(HDRINCL, sk);
+ goto copyval;
+ case IP_MULTICAST_LOOP:
+ val = inet_test_bit(MC_LOOP, sk);
+ goto copyval;
+ case IP_MULTICAST_ALL:
+ val = inet_test_bit(MC_ALL, sk);
+ goto copyval;
+ case IP_TRANSPARENT:
+ val = inet_test_bit(TRANSPARENT, sk);
+ goto copyval;
+ case IP_NODEFRAG:
+ val = inet_test_bit(NODEFRAG, sk);
+ goto copyval;
+ case IP_BIND_ADDRESS_NO_PORT:
+ val = inet_test_bit(BIND_ADDRESS_NO_PORT, sk);
+ goto copyval;
+ case IP_TTL:
+ val = READ_ONCE(inet->uc_ttl);
+ if (val < 0)
+ val = READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_default_ttl);
+ goto copyval;
+ case IP_MINTTL:
+ val = READ_ONCE(inet->min_ttl);
+ goto copyval;
+ }
+
if (needs_rtnl)
rtnl_lock();
sockopt_lock_sock(sk);
@@ -1600,53 +1630,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
return -EFAULT;
return 0;
}
- case IP_PKTINFO:
- val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
- break;
- case IP_RECVTTL:
- val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
- break;
- case IP_RECVTOS:
- val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
- break;
- case IP_RECVOPTS:
- val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
- break;
- case IP_RETOPTS:
- val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
- break;
- case IP_PASSSEC:
- val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
- break;
- case IP_RECVORIGDSTADDR:
- val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
- break;
- case IP_CHECKSUM:
- val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
- break;
- case IP_RECVFRAGSIZE:
- val = (inet->cmsg_flags & IP_CMSG_RECVFRAGSIZE) != 0;
- break;
case IP_TOS:
val = inet->tos;
break;
- case IP_TTL:
- {
- struct net *net = sock_net(sk);
- val = (inet->uc_ttl == -1 ?
- READ_ONCE(net->ipv4.sysctl_ip_default_ttl) :
- inet->uc_ttl);
- break;
- }
- case IP_HDRINCL:
- val = inet->hdrincl;
- break;
- case IP_NODEFRAG:
- val = inet->nodefrag;
- break;
- case IP_BIND_ADDRESS_NO_PORT:
- val = inet->bind_address_no_port;
- break;
case IP_MTU_DISCOVER:
val = inet->pmtudisc;
break;
@@ -1665,18 +1651,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
}
break;
}
- case IP_RECVERR:
- val = inet->recverr;
- break;
- case IP_RECVERR_RFC4884:
- val = inet->recverr_rfc4884;
- break;
case IP_MULTICAST_TTL:
val = inet->mc_ttl;
break;
- case IP_MULTICAST_LOOP:
- val = inet->mc_loop;
- break;
case IP_UNICAST_IF:
val = (__force int)htonl((__u32) inet->uc_index);
break;
@@ -1715,9 +1692,6 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
else
err = ip_get_mcast_msfilter(sk, optval, optlen, len);
goto out;
- case IP_MULTICAST_ALL:
- val = inet->mc_all;
- break;
case IP_PKTOPTIONS:
{
struct msghdr msg;
@@ -1737,7 +1711,7 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
msg.msg_controllen = len;
msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0;
- if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
+ if (inet_test_bit(PKTINFO, sk)) {
struct in_pktinfo info;
info.ipi_addr.s_addr = inet->inet_rcv_saddr;
@@ -1745,26 +1719,17 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
info.ipi_ifindex = inet->mc_index;
put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
- if (inet->cmsg_flags & IP_CMSG_TTL) {
+ if (inet_test_bit(TTL, sk)) {
int hlim = inet->mc_ttl;
put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
}
- if (inet->cmsg_flags & IP_CMSG_TOS) {
+ if (inet_test_bit(TOS, sk)) {
int tos = inet->rcv_tos;
put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
}
len -= msg.msg_controllen;
return copy_to_sockptr(optlen, &len, sizeof(int));
}
- case IP_FREEBIND:
- val = inet->freebind;
- break;
- case IP_TRANSPARENT:
- val = inet->transparent;
- break;
- case IP_MINTTL:
- val = inet->min_ttl;
- break;
case IP_LOCAL_PORT_RANGE:
val = inet->local_port_range.hi << 16 | inet->local_port_range.lo;
break;
@@ -1776,7 +1741,7 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
return -ENOPROTOOPT;
}
sockopt_release_sock(sk);
-
+copyval:
if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
unsigned char ucval = (unsigned char)val;
len = 1;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index a9ba7de092c4..265b39bc435b 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,7 +66,7 @@ static unsigned int ipv4_conntrack_defrag(void *priv,
struct sock *sk = skb->sk;
if (sk && sk_fullsock(sk) && (sk->sk_family == PF_INET) &&
- inet_sk(sk)->nodefrag)
+ inet_test_bit(NODEFRAG, sk))
return NF_ACCEPT;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 09d36bcbd7d4..bbff68b5b5d4 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -3209,7 +3209,6 @@ static int rtm_dump_walk_nexthops(struct sk_buff *skb,
return err;
}
- ctx->idx++;
return 0;
}
@@ -3337,7 +3336,6 @@ static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
struct rtm_dump_res_bucket_ctx {
struct rtm_dump_nh_ctx nh;
u16 bucket_index;
- u32 done_nh_idx; /* 1 + the index of the last fully processed NH. */
};
static struct rtm_dump_res_bucket_ctx *
@@ -3366,9 +3364,6 @@ static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
u16 bucket_index;
int err;
- if (dd->ctx->nh.idx < dd->ctx->done_nh_idx)
- return 0;
-
nhg = rtnl_dereference(nh->nh_grp);
res_table = rtnl_dereference(nhg->res_table);
for (bucket_index = dd->ctx->bucket_index;
@@ -3395,7 +3390,6 @@ static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
return err;
}
- dd->ctx->done_nh_idx = dd->ctx->nh.idx + 1;
dd->ctx->bucket_index = 0;
return 0;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 25dd78cee179..75e0aee35eb7 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -580,7 +580,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
* RFC1122: OK. Passes ICMP errors back to application, as per
* 4.1.3.3.
*/
- if ((family == AF_INET && !inet_sock->recverr) ||
+ if ((family == AF_INET && !inet_test_bit(RECVERR, sk)) ||
(family == AF_INET6 && !inet6_sk(sk)->recverr)) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
@@ -894,7 +894,7 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
*addr_len = sizeof(*sin);
}
- if (isk->cmsg_flags)
+ if (inet_cmsg_flags(isk))
ip_cmsg_recv(msg, skb);
#if IS_ENABLED(CONFIG_IPV6)
@@ -921,7 +921,8 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
if (skb->protocol == htons(ETH_P_IPV6) &&
inet6_sk(sk)->rxopt.all)
pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
- else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
+ else if (skb->protocol == htons(ETH_P_IP) &&
+ inet_cmsg_flags(isk))
ip_cmsg_recv(msg, skb);
#endif
} else {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index cb381f5aa464..4b5db5d1edc2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -203,8 +203,9 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
struct inet_sock *inet = inet_sk(sk);
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
- int err = 0;
int harderr = 0;
+ bool recverr;
+ int err = 0;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
ipv4_sk_update_pmtu(skb, sk, info);
@@ -218,7 +219,8 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
2. Socket is connected (otherwise the error indication
is useless without ip_recverr and error is hard.
*/
- if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED)
+ recverr = inet_test_bit(RECVERR, sk);
+ if (!recverr && sk->sk_state != TCP_ESTABLISHED)
return;
switch (type) {
@@ -245,16 +247,16 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
}
}
- if (inet->recverr) {
+ if (recverr) {
const struct iphdr *iph = (const struct iphdr *)skb->data;
u8 *payload = skb->data + (iph->ihl << 2);
- if (inet->hdrincl)
+ if (inet_test_bit(HDRINCL, sk))
payload = skb->data;
ip_icmp_error(sk, skb, err, 0, info, payload);
}
- if (inet->recverr || harderr) {
+ if (recverr || harderr) {
sk->sk_err = err;
sk_error_report(sk);
}
@@ -413,7 +415,7 @@ error_free:
kfree_skb(skb);
error:
IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
- if (err == -ENOBUFS && !inet->recverr)
+ if (err == -ENOBUFS && !inet_test_bit(RECVERR, sk))
err = 0;
return err;
}
@@ -489,12 +491,8 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (len > 0xFFFF)
goto out;
- /* hdrincl should be READ_ONCE(inet->hdrincl)
- * but READ_ONCE() doesn't work with bit fields.
- * Doing this indirectly yields the same result.
- */
- hdrincl = inet->hdrincl;
- hdrincl = READ_ONCE(hdrincl);
+ hdrincl = inet_test_bit(HDRINCL, sk);
+
/*
* Check the flags.
*/
@@ -645,7 +643,7 @@ back_from_confirm:
ip_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE)) {
err = ip_push_pending_frames(sk, &fl4);
- if (err == -ENOBUFS && !inet->recverr)
+ if (err == -ENOBUFS && !inet_test_bit(RECVERR, sk))
err = 0;
}
release_sock(sk);
@@ -767,7 +765,7 @@ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
}
- if (inet->cmsg_flags)
+ if (inet_cmsg_flags(inet))
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 92fede388d52..a4e153dd615b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -515,13 +515,12 @@ static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
__u8 scope = RT_SCOPE_UNIVERSE;
if (sk) {
- const struct inet_sock *inet = inet_sk(sk);
-
oif = sk->sk_bound_dev_if;
mark = READ_ONCE(sk->sk_mark);
tos = ip_sock_rt_tos(sk);
scope = ip_sock_rt_scope(sk);
- prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
+ prot = inet_test_bit(HDRINCL, sk) ? IPPROTO_RAW :
+ sk->sk_protocol;
}
flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
@@ -555,7 +554,8 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
ip_sock_rt_scope(sk),
- inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+ inet_test_bit(HDRINCL, sk) ?
+ IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
rcu_read_unlock();
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4fbc7ff8c53c..cee1e548660c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -583,7 +583,8 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (urg_data & TCP_URG_VALID)
mask |= EPOLLPRI;
- } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
+ } else if (state == TCP_SYN_SENT &&
+ inet_test_bit(DEFER_CONNECT, sk)) {
/* Active TCP fastopen socket with defer_connect
* Return EPOLLOUT so application can call write()
* in order for kernel to generate SYN+data
@@ -1007,7 +1008,7 @@ int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
tp->fastopen_req->size = size;
tp->fastopen_req->uarg = uarg;
- if (inet->defer_connect) {
+ if (inet_test_bit(DEFER_CONNECT, sk)) {
err = tcp_connect(sk);
/* Same failure procedure as in tcp_v4/6_connect */
if (err) {
@@ -1025,7 +1026,7 @@ int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
if (tp->fastopen_req) {
*copied = tp->fastopen_req->copied;
tcp_free_fastopen_req(tp);
- inet->defer_connect = 0;
+ inet_clear_bit(DEFER_CONNECT, sk);
}
return err;
}
@@ -1066,7 +1067,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
zc = MSG_SPLICE_PAGES;
}
- if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
+ if (unlikely(flags & MSG_FASTOPEN ||
+ inet_test_bit(DEFER_CONNECT, sk)) &&
!tp->repair) {
err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg);
if (err == -EINPROGRESS && copied_syn > 0)
@@ -3088,7 +3090,7 @@ int tcp_disconnect(struct sock *sk, int flags)
/* Clean up fastopen related fields */
tcp_free_fastopen_req(tp);
- inet->defer_connect = 0;
+ inet_clear_bit(DEFER_CONNECT, sk);
tp->fastopen_client_fail = 0;
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 85e4953f1182..8ed54e7334a9 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -451,7 +451,7 @@ bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
if (tp->fastopen_connect && !tp->fastopen_req) {
if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
- inet_sk(sk)->defer_connect = 1;
+ inet_set_bit(DEFER_CONNECT, sk);
return true;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8e96ebe373d7..06fe1cf645d5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3525,7 +3525,7 @@ static inline bool tcp_may_update_window(const struct tcp_sock *tp,
{
return after(ack, tp->snd_una) ||
after(ack_seq, tp->snd_wl1) ||
- (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
+ (ack_seq == tp->snd_wl1 && (nwin > tp->snd_wnd || !nwin));
}
/* If we update tp->snd_una, also update tp->bytes_acked */
@@ -5059,13 +5059,19 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
/* Ok. In sequence. In window. */
queue_and_out:
- if (skb_queue_len(&sk->sk_receive_queue) == 0)
- sk_forced_mem_schedule(sk, skb->truesize);
- else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
- reason = SKB_DROP_REASON_PROTO_MEM;
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+ if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
+ /* TODO: maybe ratelimit these WIN 0 ACK ? */
+ inet_csk(sk)->icsk_ack.pending |=
+ (ICSK_ACK_NOMEM | ICSK_ACK_NOW);
+ inet_csk_schedule_ack(sk);
sk->sk_data_ready(sk);
- goto drop;
+
+ if (skb_queue_len(&sk->sk_receive_queue)) {
+ reason = SKB_DROP_REASON_PROTO_MEM;
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+ goto drop;
+ }
+ sk_forced_mem_schedule(sk, skb->truesize);
}
eaten = tcp_queue_rcv(sk, skb, &fragstolen);
@@ -6994,7 +7000,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb, sk);
- inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent;
+ inet_rsk(req)->no_srccheck = inet_test_bit(TRANSPARENT, sk);
/* Note: tcp_v6_init_req() might override ir_iif for link locals */
inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5b18a048f613..2a662d5f3072 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -477,7 +477,6 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
struct tcp_sock *tp;
- struct inet_sock *inet;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
@@ -625,8 +624,8 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
* --ANK (980905)
*/
- inet = inet_sk(sk);
- if (!sock_owned_by_user(sk) && inet->recverr) {
+ if (!sock_owned_by_user(sk) &&
+ inet_test_bit(RECVERR, sk)) {
WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk);
} else { /* Only an error on timeout */
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 13ee12983c42..b98d476f1594 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -289,9 +289,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
if (tw) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
- struct inet_sock *inet = inet_sk(sk);
- tw->tw_transparent = inet->transparent;
+ tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
tw->tw_mark = sk->sk_mark;
tw->tw_priority = sk->sk_priority;
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index c5412ee77fc8..769a558159ee 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -257,11 +257,19 @@ EXPORT_SYMBOL(tcp_select_initial_window);
static u16 tcp_select_window(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- u32 old_win = tp->rcv_wnd;
- u32 cur_win = tcp_receive_window(tp);
- u32 new_win = __tcp_select_window(sk);
struct net *net = sock_net(sk);
+ u32 old_win = tp->rcv_wnd;
+ u32 cur_win, new_win;
+
+ /* Make the window 0 if we failed to queue the data because we
+ * are out of memory. The window is temporary, so we don't store
+ * it on the socket.
+ */
+ if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM))
+ return 0;
+ cur_win = tcp_receive_window(tp);
+ new_win = __tcp_select_window(sk);
if (new_win < cur_win) {
/* Danger Will Robinson!
* Don't update rcv_wup/rcv_wnd here or else
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index d45c96c7f5a4..74c70fc1003c 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -454,6 +454,22 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
req->timeout << req->num_timeout, TCP_RTO_MAX);
}
+static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const int timeout = TCP_RTO_MAX * 2;
+ u32 rcv_delta, rtx_delta;
+
+ rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
+ if (rcv_delta <= timeout)
+ return false;
+
+ rtx_delta = (u32)msecs_to_jiffies(tcp_time_stamp(tp) -
+ (tp->retrans_stamp ?: tcp_skb_timestamp(skb)));
+
+ return rtx_delta > timeout;
+}
/**
* tcp_retransmit_timer() - The TCP retransmit timeout handler
@@ -503,23 +519,26 @@ void tcp_retransmit_timer(struct sock *sk)
* we cannot allow such beasts to hang infinitely.
*/
struct inet_sock *inet = inet_sk(sk);
+ u32 rtx_delta;
+
+ rtx_delta = tcp_time_stamp(tp) - (tp->retrans_stamp ?: tcp_skb_timestamp(skb));
if (sk->sk_family == AF_INET) {
- net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
- &inet->inet_daddr,
- ntohs(inet->inet_dport),
- inet->inet_num,
- tp->snd_una, tp->snd_nxt);
+ net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
+ &inet->inet_daddr, ntohs(inet->inet_dport),
+ inet->inet_num, tp->snd_una, tp->snd_nxt,
+ jiffies_to_msecs(jiffies - tp->rcv_tstamp),
+ rtx_delta);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (sk->sk_family == AF_INET6) {
- net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
- &sk->sk_v6_daddr,
- ntohs(inet->inet_dport),
- inet->inet_num,
- tp->snd_una, tp->snd_nxt);
+ net_dbg_ratelimited("Probing zero-window on %pI6:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
+ &sk->sk_v6_daddr, ntohs(inet->inet_dport),
+ inet->inet_num, tp->snd_una, tp->snd_nxt,
+ jiffies_to_msecs(jiffies - tp->rcv_tstamp),
+ rtx_delta);
}
#endif
- if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
+ if (tcp_rtx_probe0_timed_out(sk, skb)) {
tcp_write_err(sk);
goto out;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 3e2f29c14fa8..0794a2c46a56 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -779,7 +779,7 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
(u8 *)(uh+1));
goto out;
}
- if (!inet->recverr) {
+ if (!inet_test_bit(RECVERR, sk)) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
} else
@@ -962,7 +962,8 @@ csum_partial:
send:
err = ip_send_skb(sock_net(sk), skb);
if (err) {
- if (err == -ENOBUFS && !inet->recverr) {
+ if (err == -ENOBUFS &&
+ !inet_test_bit(RECVERR, sk)) {
UDP_INC_STATS(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
@@ -1870,7 +1871,7 @@ try_again:
if (udp_sk(sk)->gro_enabled)
udp_cmsg_recv(msg, sk, skb);
- if (inet->cmsg_flags)
+ if (inet_cmsg_flags(inet))
ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
err = copied;
diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
index 5f8104cf082d..9b18f371af0d 100644
--- a/net/ipv4/udp_tunnel_core.c
+++ b/net/ipv4/udp_tunnel_core.c
@@ -63,7 +63,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
struct sock *sk = sock->sk;
/* Disable multicast loopback */
- inet_sk(sk)->mc_loop = 0;
+ inet_clear_bit(MC_LOOP, sk);
/* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
inet_inc_convert_csum(sk);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 9f9c4b838664..368824fe9719 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -200,12 +200,12 @@ lookup_protocol:
sk->sk_reuse = SK_CAN_REUSE;
inet = inet_sk(sk);
- inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
+ inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
if (SOCK_RAW == sock->type) {
inet->inet_num = protocol;
if (IPPROTO_RAW == protocol)
- inet->hdrincl = 1;
+ inet_set_bit(HDRINCL, sk);
}
sk->sk_destruct = inet6_sock_destruct;
@@ -229,7 +229,7 @@ lookup_protocol:
*/
inet->uc_ttl = -1;
- inet->mc_loop = 1;
+ inet_set_bit(MC_LOOP, sk);
inet->mc_ttl = 1;
inet->mc_index = 0;
RCU_INIT_POINTER(inet->mc_list, NULL);
@@ -399,7 +399,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
sk->sk_ipv6only = 1;
/* Make sure we are allowed to bind here. */
- if (snum || !(inet->bind_address_no_port ||
+ if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) ||
(flags & BIND_FORCE_ADDRESS_NO_PORT))) {
err = sk->sk_prot->get_port(sk, snum);
if (err) {
@@ -435,10 +435,8 @@ out_unlock:
goto out;
}
-/* bind for INET6 API */
-int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+int inet6_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
- struct sock *sk = sock->sk;
u32 flags = BIND_WITH_LOCK;
const struct proto *prot;
int err = 0;
@@ -462,6 +460,12 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return __inet6_bind(sk, uaddr, addr_len, flags);
}
+
+/* bind for INET6 API */
+int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+{
+ return inet6_bind_sk(sock->sk, uaddr, addr_len);
+}
EXPORT_SYMBOL(inet6_bind);
int inet6_release(struct socket *sock)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index d80d6024cafa..41ebc4e57473 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -524,7 +524,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
} else {
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin->sin6_addr);
- if (inet_sk(sk)->cmsg_flags)
+ if (inet_cmsg_flags(inet_sk(sk)))
ip_cmsg_recv(msg, skb);
}
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bac768d36cc1..28b01a068412 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -160,6 +160,8 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh)
INIT_LIST_HEAD(&f6i->fib6_siblings);
refcount_set(&f6i->fib6_ref, 1);
+ INIT_HLIST_NODE(&f6i->gc_link);
+
return f6i;
}
@@ -246,6 +248,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
net->ipv6.fib6_null_entry);
table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
inet_peer_base_init(&table->tb6_peers);
+ INIT_HLIST_HEAD(&table->tb6_gc_hlist);
}
return table;
@@ -1057,6 +1060,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
lockdep_is_held(&table->tb6_lock));
}
}
+
+ fib6_clean_expires_locked(rt);
}
/*
@@ -1118,9 +1123,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
if (!(iter->fib6_flags & RTF_EXPIRES))
return -EEXIST;
if (!(rt->fib6_flags & RTF_EXPIRES))
- fib6_clean_expires(iter);
+ fib6_clean_expires_locked(iter);
else
- fib6_set_expires(iter, rt->expires);
+ fib6_set_expires_locked(iter,
+ rt->expires);
if (rt->fib6_pmtu)
fib6_metric_set(iter, RTAX_MTU,
@@ -1479,6 +1485,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
if (rt->nh)
list_add(&rt->nh_list, &rt->nh->f6i_list);
__fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
+
+ if (fib6_has_expires(rt))
+ hlist_add_head(&rt->gc_link, &table->tb6_gc_hlist);
+
fib6_start_gc(info->nl_net, rt);
}
@@ -2285,9 +2295,8 @@ static void fib6_flush_trees(struct net *net)
* Garbage collection
*/
-static int fib6_age(struct fib6_info *rt, void *arg)
+static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args)
{
- struct fib6_gc_args *gc_args = arg;
unsigned long now = jiffies;
/*
@@ -2295,7 +2304,7 @@ static int fib6_age(struct fib6_info *rt, void *arg)
* Routes are expired even if they are in use.
*/
- if (rt->fib6_flags & RTF_EXPIRES && rt->expires) {
+ if (fib6_has_expires(rt) && rt->expires) {
if (time_after(now, rt->expires)) {
RT6_TRACE("expiring %p\n", rt);
return -1;
@@ -2312,6 +2321,40 @@ static int fib6_age(struct fib6_info *rt, void *arg)
return 0;
}
+static void fib6_gc_table(struct net *net,
+ struct fib6_table *tb6,
+ struct fib6_gc_args *gc_args)
+{
+ struct fib6_info *rt;
+ struct hlist_node *n;
+ struct nl_info info = {
+ .nl_net = net,
+ .skip_notify = false,
+ };
+
+ hlist_for_each_entry_safe(rt, n, &tb6->tb6_gc_hlist, gc_link)
+ if (fib6_age(rt, gc_args) == -1)
+ fib6_del(rt, &info);
+}
+
+static void fib6_gc_all(struct net *net, struct fib6_gc_args *gc_args)
+{
+ struct fib6_table *table;
+ struct hlist_head *head;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+ head = &net->ipv6.fib_table_hash[h];
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
+ spin_lock_bh(&table->tb6_lock);
+ fib6_gc_table(net, table, gc_args);
+ spin_unlock_bh(&table->tb6_lock);
+ }
+ }
+ rcu_read_unlock();
+}
+
void fib6_run_gc(unsigned long expires, struct net *net, bool force)
{
struct fib6_gc_args gc_args;
@@ -2327,7 +2370,7 @@ void fib6_run_gc(unsigned long expires, struct net *net, bool force)
net->ipv6.sysctl.ip6_rt_gc_interval;
gc_args.more = 0;
- fib6_clean_all(net, fib6_age, &gc_args);
+ fib6_gc_all(net, &gc_args);
now = jiffies;
net->ipv6.ip6_rt_last_gc = now;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index bc96559bbf0f..f8a1f6bb3f87 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1591,7 +1591,7 @@ emsgsize:
}
}
} else if ((flags & MSG_SPLICE_PAGES) && length) {
- if (inet_sk(sk)->hdrincl)
+ if (inet_test_bit(HDRINCL, sk))
return -EPERM;
if (rt->dst.dev->features & NETIF_F_SG &&
getfrag == ip_generic_getfrag)
@@ -1995,7 +1995,8 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
u8 icmp6_type;
- if (sk->sk_socket->type == SOCK_RAW && !inet_sk(sk)->hdrincl)
+ if (sk->sk_socket->type == SOCK_RAW &&
+ !inet_test_bit(HDRINCL, sk))
icmp6_type = fl6->fl6_icmp_type;
else
icmp6_type = icmp6_hdr(skb)->icmp6_type;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index ca377159967c..d19577a94bcc 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -102,7 +102,7 @@ int ip6_ra_control(struct sock *sk, int sel)
struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
struct ipv6_txoptions *opt)
{
- if (inet_sk(sk)->is_icsk) {
+ if (inet_test_bit(IS_ICSK, sk)) {
if (opt &&
!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) {
@@ -633,7 +633,7 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (optlen < sizeof(int))
goto e_inval;
/* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
- inet_sk(sk)->transparent = valbool;
+ inet_assign_bit(TRANSPARENT, sk, valbool);
retv = 0;
break;
@@ -641,7 +641,7 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (optlen < sizeof(int))
goto e_inval;
/* we also don't have a separate freebind bit for IPV6 */
- inet_sk(sk)->freebind = valbool;
+ inet_assign_bit(FREEBIND, sk, valbool);
retv = 0;
break;
@@ -831,7 +831,7 @@ done:
goto e_inval;
retv = -EPROTO;
- if (inet_sk(sk)->is_icsk)
+ if (inet_test_bit(IS_ICSK, sk))
break;
retv = -EFAULT;
@@ -1330,11 +1330,11 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
}
case IPV6_TRANSPARENT:
- val = inet_sk(sk)->transparent;
+ val = inet_test_bit(TRANSPARENT, sk);
break;
case IPV6_FREEBIND:
- val = inet_sk(sk)->freebind;
+ val = inet_test_bit(FREEBIND, sk);
break;
case IPV6_RECVORIGDSTADDR:
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ea16734f5e1f..0eae7661a85c 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -291,7 +291,6 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
- struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
int err;
int harderr;
@@ -315,7 +314,7 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
}
if (np->recverr) {
u8 *payload = skb->data;
- if (!inet->hdrincl)
+ if (!inet_test_bit(HDRINCL, sk))
payload += offset;
ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
}
@@ -406,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
skb->len,
inet->inet_num, 0));
- if (inet->hdrincl) {
+ if (inet_test_bit(HDRINCL, sk)) {
if (skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb_reason(skb, SKB_DROP_REASON_SKB_CSUM);
@@ -762,12 +761,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- /* hdrincl should be READ_ONCE(inet->hdrincl)
- * but READ_ONCE() doesn't work with bit fields.
- * Doing this indirectly yields the same result.
- */
- hdrincl = inet->hdrincl;
- hdrincl = READ_ONCE(hdrincl);
+ hdrincl = inet_test_bit(HDRINCL, sk);
/*
* Get and verify the address.
@@ -1000,7 +994,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
case IPV6_HDRINCL:
if (sk->sk_type != SOCK_RAW)
return -EINVAL;
- inet_sk(sk)->hdrincl = !!val;
+ inet_assign_bit(HDRINCL, sk, val);
return 0;
case IPV6_CHECKSUM:
if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
@@ -1068,7 +1062,7 @@ static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
switch (optname) {
case IPV6_HDRINCL:
- val = inet_sk(sk)->hdrincl;
+ val = inet_test_bit(HDRINCL, sk);
break;
case IPV6_CHECKSUM:
/*
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 10751df16dab..db10c36f34bb 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3761,10 +3761,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
rt->dst_nocount = true;
if (cfg->fc_flags & RTF_EXPIRES)
- fib6_set_expires(rt, jiffies +
- clock_t_to_jiffies(cfg->fc_expires));
+ fib6_set_expires_locked(rt, jiffies +
+ clock_t_to_jiffies(cfg->fc_expires));
else
- fib6_clean_expires(rt);
+ fib6_clean_expires_locked(rt);
if (cfg->fc_protocol == RTPROT_UNSPEC)
cfg->fc_protocol = RTPROT_BOOT;
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index dd433cc265c8..24e2b4b494cb 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -109,15 +109,19 @@ struct bpf_lwt_prog {
#define next_csid_chk_lcnode_fn_bits(flen) \
next_csid_chk_lcblock_bits(flen)
+/* flag indicating that flavors are set up for a given End* behavior */
+#define SEG6_F_LOCAL_FLAVORS SEG6_F_ATTR(SEG6_LOCAL_FLAVORS)
+
#define SEG6_F_LOCAL_FLV_OP(flvname) BIT(SEG6_LOCAL_FLV_OP_##flvname)
+#define SEG6_F_LOCAL_FLV_NEXT_CSID SEG6_F_LOCAL_FLV_OP(NEXT_CSID)
#define SEG6_F_LOCAL_FLV_PSP SEG6_F_LOCAL_FLV_OP(PSP)
/* Supported RFC8986 Flavor operations are reported in this bitmask */
#define SEG6_LOCAL_FLV8986_SUPP_OPS SEG6_F_LOCAL_FLV_PSP
-/* Supported Flavor operations are reported in this bitmask */
-#define SEG6_LOCAL_FLV_SUPP_OPS (SEG6_F_LOCAL_FLV_OP(NEXT_CSID) | \
+#define SEG6_LOCAL_END_FLV_SUPP_OPS (SEG6_F_LOCAL_FLV_NEXT_CSID | \
SEG6_LOCAL_FLV8986_SUPP_OPS)
+#define SEG6_LOCAL_END_X_FLV_SUPP_OPS SEG6_F_LOCAL_FLV_NEXT_CSID
struct seg6_flavors_info {
/* Flavor operations */
@@ -411,9 +415,72 @@ static int end_next_csid_core(struct sk_buff *skb, struct seg6_local_lwt *slwt)
return input_action_end_finish(skb, slwt);
}
+static int input_action_end_x_finish(struct sk_buff *skb,
+ struct seg6_local_lwt *slwt)
+{
+ seg6_lookup_nexthop(skb, &slwt->nh6, 0);
+
+ return dst_input(skb);
+}
+
+static int input_action_end_x_core(struct sk_buff *skb,
+ struct seg6_local_lwt *slwt)
+{
+ struct ipv6_sr_hdr *srh;
+
+ srh = get_and_validate_srh(skb);
+ if (!srh)
+ goto drop;
+
+ advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
+
+ return input_action_end_x_finish(skb, slwt);
+
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int end_x_next_csid_core(struct sk_buff *skb,
+ struct seg6_local_lwt *slwt)
+{
+ const struct seg6_flavors_info *finfo = &slwt->flv_info;
+ struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
+
+ if (seg6_next_csid_is_arg_zero(daddr, finfo))
+ return input_action_end_x_core(skb, slwt);
+
+ /* update DA */
+ seg6_next_csid_advance_arg(daddr, finfo);
+
+ return input_action_end_x_finish(skb, slwt);
+}
+
static bool seg6_next_csid_enabled(__u32 fops)
{
- return fops & BIT(SEG6_LOCAL_FLV_OP_NEXT_CSID);
+ return fops & SEG6_F_LOCAL_FLV_NEXT_CSID;
+}
+
+/* Processing of SRv6 End, End.X, and End.T behaviors can be extended through
+ * the flavors framework. These behaviors must report the subset of (flavor)
+ * operations they currently implement. In this way, if a user specifies a
+ * flavor combination that is not supported by a given End* behavior, the
+ * kernel refuses to instantiate the tunnel reporting the error.
+ */
+static int seg6_flv_supp_ops_by_action(int action, __u32 *fops)
+{
+ switch (action) {
+ case SEG6_LOCAL_ACTION_END:
+ *fops = SEG6_LOCAL_END_FLV_SUPP_OPS;
+ break;
+ case SEG6_LOCAL_ACTION_END_X:
+ *fops = SEG6_LOCAL_END_X_FLV_SUPP_OPS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
/* We describe the packet state in relation to the absence/presence of the SRH
@@ -746,21 +813,14 @@ static int input_action_end(struct sk_buff *skb, struct seg6_local_lwt *slwt)
/* regular endpoint, and forward to specified nexthop */
static int input_action_end_x(struct sk_buff *skb, struct seg6_local_lwt *slwt)
{
- struct ipv6_sr_hdr *srh;
-
- srh = get_and_validate_srh(skb);
- if (!srh)
- goto drop;
-
- advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
-
- seg6_lookup_nexthop(skb, &slwt->nh6, 0);
+ const struct seg6_flavors_info *finfo = &slwt->flv_info;
+ __u32 fops = finfo->flv_ops;
- return dst_input(skb);
+ /* check for the presence of NEXT-C-SID since it applies first */
+ if (seg6_next_csid_enabled(fops))
+ return end_x_next_csid_core(skb, slwt);
-drop:
- kfree_skb(skb);
- return -EINVAL;
+ return input_action_end_x_core(skb, slwt);
}
static int input_action_end_t(struct sk_buff *skb, struct seg6_local_lwt *slwt)
@@ -1404,13 +1464,14 @@ static struct seg6_action_desc seg6_action_table[] = {
.action = SEG6_LOCAL_ACTION_END,
.attrs = 0,
.optattrs = SEG6_F_LOCAL_COUNTERS |
- SEG6_F_ATTR(SEG6_LOCAL_FLAVORS),
+ SEG6_F_LOCAL_FLAVORS,
.input = input_action_end,
},
{
.action = SEG6_LOCAL_ACTION_END_X,
.attrs = SEG6_F_ATTR(SEG6_LOCAL_NH6),
- .optattrs = SEG6_F_LOCAL_COUNTERS,
+ .optattrs = SEG6_F_LOCAL_COUNTERS |
+ SEG6_F_LOCAL_FLAVORS,
.input = input_action_end_x,
},
{
@@ -2070,7 +2131,8 @@ static int parse_nla_flavors(struct nlattr **attrs, struct seg6_local_lwt *slwt,
{
struct seg6_flavors_info *finfo = &slwt->flv_info;
struct nlattr *tb[SEG6_LOCAL_FLV_MAX + 1];
- unsigned long fops;
+ int action = slwt->action;
+ __u32 fops, supp_fops;
int rc;
rc = nla_parse_nested_deprecated(tb, SEG6_LOCAL_FLV_MAX,
@@ -2086,7 +2148,8 @@ static int parse_nla_flavors(struct nlattr **attrs, struct seg6_local_lwt *slwt,
return -EINVAL;
fops = nla_get_u32(tb[SEG6_LOCAL_FLV_OPERATION]);
- if (fops & ~SEG6_LOCAL_FLV_SUPP_OPS) {
+ rc = seg6_flv_supp_ops_by_action(action, &supp_fops);
+ if (rc < 0 || (fops & ~supp_fops)) {
NL_SET_ERR_MSG(extack, "Unsupported Flavor operation(s)");
return -EOPNOTSUPP;
}
@@ -2618,6 +2681,11 @@ int __init seg6_local_init(void)
*/
BUILD_BUG_ON(SEG6_LOCAL_MAX + 1 > BITS_PER_TYPE(unsigned long));
+ /* Check whether the number of defined flavors exceeds the maximum
+ * allowed value.
+ */
+ BUILD_BUG_ON(SEG6_LOCAL_FLV_OP_MAX + 1 > BITS_PER_TYPE(__u32));
+
/* If the default NEXT-C-SID Locator-Block/Node Function lengths (in
* bits) have been changed with invalid values, kernel build stops
* here.
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1ea01b0d9be3..ebc6ae47cfea 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -420,7 +420,7 @@ try_again:
ip6_datagram_recv_common_ctl(sk, msg, skb);
if (is_udp4) {
- if (inet->cmsg_flags)
+ if (inet_cmsg_flags(inet))
ip_cmsg_recv_offset(msg, sk, skb,
sizeof(struct udphdr), off);
} else {
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index f9073bc7281f..9a2a9ed3ba47 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -552,7 +552,7 @@ static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
}
- if (inet->cmsg_flags)
+ if (inet_cmsg_flags(inet))
ip_cmsg_recv(msg, skb);
if (flags & MSG_TRUNC)
copied = skb->len;
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 5692daf57a4d..c75d9d88a053 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -9,6 +9,7 @@
#include <linux/inet.h>
#include <linux/kernel.h>
#include <net/tcp.h>
+#include <net/inet_common.h>
#include <net/netns/generic.h>
#include <net/mptcp.h>
#include <net/genetlink.h>
@@ -1005,8 +1006,7 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
bool is_ipv6 = sk->sk_family == AF_INET6;
int addrlen = sizeof(struct sockaddr_in);
struct sockaddr_storage addr;
- struct socket *ssock;
- struct sock *newsk;
+ struct sock *newsk, *ssk;
int backlog = 1024;
int err;
@@ -1032,28 +1032,32 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
&mptcp_keys[is_ipv6]);
lock_sock(newsk);
- ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
+ ssk = __mptcp_nmpc_sk(mptcp_sk(newsk));
release_sock(newsk);
- if (IS_ERR(ssock))
- return PTR_ERR(ssock);
+ if (IS_ERR(ssk))
+ return PTR_ERR(ssk);
mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
if (entry->addr.family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
#endif
- err = kernel_bind(ssock, (struct sockaddr *)&addr, addrlen);
+ if (ssk->sk_family == AF_INET)
+ err = inet_bind_sk(ssk, (struct sockaddr *)&addr, addrlen);
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (ssk->sk_family == AF_INET6)
+ err = inet6_bind_sk(ssk, (struct sockaddr *)&addr, addrlen);
+#endif
if (err)
return err;
inet_sk_state_store(newsk, TCP_LISTEN);
- err = kernel_listen(ssock, backlog);
- if (err)
- return err;
-
- mptcp_event_pm_listener(ssock->sk, MPTCP_EVENT_LISTENER_CREATED);
-
- return 0;
+ lock_sock(ssk);
+ err = __inet_listen_sk(ssk, backlog);
+ if (!err)
+ mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
+ release_sock(ssk);
+ return err;
}
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 48e649fe2360..6019a3cf1625 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -92,7 +92,6 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio;
WRITE_ONCE(msk->first, ssock->sk);
- WRITE_ONCE(msk->subflow, ssock);
subflow = mptcp_subflow_ctx(ssock->sk);
list_add(&subflow->node, &msk->conn_list);
sock_hold(ssock->sk);
@@ -102,6 +101,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
/* This is the first subflow, always with id 0 */
subflow->local_id_valid = 1;
mptcp_sock_graft(msk->first, sk->sk_socket);
+ iput(SOCK_INODE(ssock));
return 0;
}
@@ -109,7 +109,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
/* If the MPC handshake is not started, returns the first subflow,
* eventually allocating it.
*/
-struct socket *__mptcp_nmpc_socket(struct mptcp_sock *msk)
+struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
int ret;
@@ -117,10 +117,7 @@ struct socket *__mptcp_nmpc_socket(struct mptcp_sock *msk)
if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
return ERR_PTR(-EINVAL);
- if (!msk->subflow) {
- if (msk->first)
- return ERR_PTR(-EINVAL);
-
+ if (!msk->first) {
ret = __mptcp_socket_create(msk);
if (ret)
return ERR_PTR(ret);
@@ -128,7 +125,7 @@ struct socket *__mptcp_nmpc_socket(struct mptcp_sock *msk)
mptcp_sockopt_sync(msk, msk->first);
}
- return msk->subflow;
+ return msk->first;
}
static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
@@ -1643,7 +1640,6 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
{
unsigned int saved_flags = msg->msg_flags;
struct mptcp_sock *msk = mptcp_sk(sk);
- struct socket *ssock;
struct sock *ssk;
int ret;
@@ -1654,9 +1650,9 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
* fastopen attempt, no need to check for additional subflow status.
*/
if (msg->msg_flags & MSG_FASTOPEN) {
- ssock = __mptcp_nmpc_socket(msk);
- if (IS_ERR(ssock))
- return PTR_ERR(ssock);
+ ssk = __mptcp_nmpc_sk(msk);
+ if (IS_ERR(ssk))
+ return PTR_ERR(ssk);
}
if (!msk->first)
return -EINVAL;
@@ -1690,7 +1686,7 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
if (!mptcp_disconnect(sk, 0))
sk->sk_socket->state = SS_UNCONNECTED;
}
- inet_sk(sk)->defer_connect = 0;
+ inet_clear_bit(DEFER_CONNECT, sk);
return ret;
}
@@ -1708,7 +1704,8 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
lock_sock(sk);
- if (unlikely(inet_sk(sk)->defer_connect || msg->msg_flags & MSG_FASTOPEN)) {
+ if (unlikely(inet_test_bit(DEFER_CONNECT, sk) ||
+ msg->msg_flags & MSG_FASTOPEN)) {
int copied_syn = 0;
ret = mptcp_sendmsg_fastopen(sk, msg, len, &copied_syn);
@@ -2242,14 +2239,6 @@ static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
return min_stale_count > 1 ? backup : NULL;
}
-static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk)
-{
- if (msk->subflow) {
- iput(SOCK_INODE(msk->subflow));
- WRITE_ONCE(msk->subflow, NULL);
- }
-}
-
bool __mptcp_retransmit_pending_data(struct sock *sk)
{
struct mptcp_data_frag *cur, *rtx_head;
@@ -2328,7 +2317,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
goto out_release;
}
- dispose_it = !msk->subflow || ssk != msk->subflow->sk;
+ dispose_it = msk->free_first || ssk != msk->first;
if (dispose_it)
list_del(&subflow->node);
@@ -2349,7 +2338,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
* disconnect should never fail
*/
WARN_ON_ONCE(tcp_disconnect(ssk, 0));
- msk->subflow->state = SS_UNCONNECTED;
mptcp_subflow_ctx_reset(subflow);
release_sock(ssk);
@@ -2662,7 +2650,7 @@ unlock:
sock_put(sk);
}
-static int __mptcp_init_sock(struct sock *sk)
+static void __mptcp_init_sock(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -2689,8 +2677,6 @@ static int __mptcp_init_sock(struct sock *sk)
/* re-use the csk retrans timer for MPTCP-level retrans */
timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
-
- return 0;
}
static void mptcp_ca_reset(struct sock *sk)
@@ -2708,11 +2694,8 @@ static void mptcp_ca_reset(struct sock *sk)
static int mptcp_init_sock(struct sock *sk)
{
struct net *net = sock_net(sk);
- int ret;
- ret = __mptcp_init_sock(sk);
- if (ret)
- return ret;
+ __mptcp_init_sock(sk);
if (!mptcp_is_enabled(net))
return -ENOPROTOOPT;
@@ -3110,7 +3093,6 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
msk = mptcp_sk(nsk);
msk->local_key = subflow_req->local_key;
msk->token = subflow_req->token;
- WRITE_ONCE(msk->subflow, NULL);
msk->in_accept_queue = 1;
WRITE_ONCE(msk->fully_established, false);
if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
@@ -3174,25 +3156,17 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
}
-static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
+static struct sock *mptcp_accept(struct sock *ssk, int flags, int *err,
bool kern)
{
- struct mptcp_sock *msk = mptcp_sk(sk);
- struct socket *listener;
struct sock *newsk;
- listener = READ_ONCE(msk->subflow);
- if (WARN_ON_ONCE(!listener)) {
- *err = -EINVAL;
- return NULL;
- }
-
- pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
- newsk = inet_csk_accept(listener->sk, flags, err, kern);
+ pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk));
+ newsk = inet_csk_accept(ssk, flags, err, kern);
if (!newsk)
return NULL;
- pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
+ pr_debug("newsk=%p, subflow is mptcp=%d", newsk, sk_is_mptcp(newsk));
if (sk_is_mptcp(newsk)) {
struct mptcp_subflow_context *subflow;
struct sock *new_mptcp_sock;
@@ -3209,9 +3183,9 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
}
newsk = new_mptcp_sock;
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
} else {
- MPTCP_INC_STATS(sock_net(sk),
+ MPTCP_INC_STATS(sock_net(ssk),
MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
}
@@ -3252,10 +3226,8 @@ static void mptcp_destroy(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- /* clears msk->subflow, allowing the following to close
- * even the initial subflow
- */
- mptcp_dispose_initial_subflow(msk);
+ /* allow the following to close even the initial subflow */
+ msk->free_first = 1;
mptcp_destroy_common(msk, 0);
sk_sockets_allocated_dec(sk);
}
@@ -3405,14 +3377,12 @@ static void mptcp_unhash(struct sock *sk)
static int mptcp_get_port(struct sock *sk, unsigned short snum)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- struct socket *ssock;
- ssock = msk->subflow;
- pr_debug("msk=%p, subflow=%p", msk, ssock);
- if (WARN_ON_ONCE(!ssock))
+ pr_debug("msk=%p, ssk=%p", msk, msk->first);
+ if (WARN_ON_ONCE(!msk->first))
return -EINVAL;
- return inet_csk_get_port(ssock->sk, snum);
+ return inet_csk_get_port(msk->first, snum);
}
void mptcp_finish_connect(struct sock *ssk)
@@ -3587,25 +3557,24 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
- struct socket *ssock;
int err = -EINVAL;
+ struct sock *ssk;
- ssock = __mptcp_nmpc_socket(msk);
- if (IS_ERR(ssock))
- return PTR_ERR(ssock);
+ ssk = __mptcp_nmpc_sk(msk);
+ if (IS_ERR(ssk))
+ return PTR_ERR(ssk);
- mptcp_token_destroy(msk);
inet_sk_state_store(sk, TCP_SYN_SENT);
- subflow = mptcp_subflow_ctx(ssock->sk);
+ subflow = mptcp_subflow_ctx(ssk);
#ifdef CONFIG_TCP_MD5SIG
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
* TCP option space.
*/
- if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
+ if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
mptcp_subflow_early_fallback(msk, subflow);
#endif
- if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) {
- MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT);
+ if (subflow->request_mptcp && mptcp_token_new_connect(ssk)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
mptcp_subflow_early_fallback(msk, subflow);
}
if (likely(!__mptcp_check_fallback(msk)))
@@ -3614,25 +3583,42 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
/* if reaching here via the fastopen/sendmsg path, the caller already
* acquired the subflow socket lock, too.
*/
- if (msk->fastopening)
- err = __inet_stream_connect(ssock, uaddr, addr_len, O_NONBLOCK, 1);
- else
- err = inet_stream_connect(ssock, uaddr, addr_len, O_NONBLOCK);
- inet_sk(sk)->defer_connect = inet_sk(ssock->sk)->defer_connect;
+ if (!msk->fastopening)
+ lock_sock(ssk);
+
+ /* the following mirrors closely a very small chunk of code from
+ * __inet_stream_connect()
+ */
+ if (ssk->sk_state != TCP_CLOSE)
+ goto out;
+
+ if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) {
+ err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len);
+ if (err)
+ goto out;
+ }
+
+ err = ssk->sk_prot->connect(ssk, uaddr, addr_len);
+ if (err < 0)
+ goto out;
+
+ inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk));
+
+out:
+ if (!msk->fastopening)
+ release_sock(ssk);
/* on successful connect, the msk state will be moved to established by
* subflow_finish_connect()
*/
- if (unlikely(err && err != -EINPROGRESS)) {
- inet_sk_state_store(sk, inet_sk_state_load(ssock->sk));
+ if (unlikely(err)) {
+ /* avoid leaving a dangling token in an unconnected socket */
+ mptcp_token_destroy(msk);
+ inet_sk_state_store(sk, TCP_CLOSE);
return err;
}
- mptcp_copy_inaddrs(sk, ssock->sk);
-
- /* silence EINPROGRESS and let the caller inet_stream_connect
- * handle the connection in progress
- */
+ mptcp_copy_inaddrs(sk, ssk);
return 0;
}
@@ -3673,22 +3659,27 @@ static struct proto mptcp_prot = {
static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct mptcp_sock *msk = mptcp_sk(sock->sk);
- struct socket *ssock;
- int err;
+ struct sock *ssk, *sk = sock->sk;
+ int err = -EINVAL;
- lock_sock(sock->sk);
- ssock = __mptcp_nmpc_socket(msk);
- if (IS_ERR(ssock)) {
- err = PTR_ERR(ssock);
+ lock_sock(sk);
+ ssk = __mptcp_nmpc_sk(msk);
+ if (IS_ERR(ssk)) {
+ err = PTR_ERR(ssk);
goto unlock;
}
- err = READ_ONCE(ssock->ops)->bind(ssock, uaddr, addr_len);
+ if (sk->sk_family == AF_INET)
+ err = inet_bind_sk(ssk, uaddr, addr_len);
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ else if (sk->sk_family == AF_INET6)
+ err = inet6_bind_sk(ssk, uaddr, addr_len);
+#endif
if (!err)
- mptcp_copy_inaddrs(sock->sk, ssock->sk);
+ mptcp_copy_inaddrs(sk, ssk);
unlock:
- release_sock(sock->sk);
+ release_sock(sk);
return err;
}
@@ -3696,7 +3687,7 @@ static int mptcp_listen(struct socket *sock, int backlog)
{
struct mptcp_sock *msk = mptcp_sk(sock->sk);
struct sock *sk = sock->sk;
- struct socket *ssock;
+ struct sock *ssk;
int err;
pr_debug("msk=%p", msk);
@@ -3707,22 +3698,24 @@ static int mptcp_listen(struct socket *sock, int backlog)
if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
goto unlock;
- ssock = __mptcp_nmpc_socket(msk);
- if (IS_ERR(ssock)) {
- err = PTR_ERR(ssock);
+ ssk = __mptcp_nmpc_sk(msk);
+ if (IS_ERR(ssk)) {
+ err = PTR_ERR(ssk);
goto unlock;
}
- mptcp_token_destroy(msk);
inet_sk_state_store(sk, TCP_LISTEN);
sock_set_flag(sk, SOCK_RCU_FREE);
- err = READ_ONCE(ssock->ops)->listen(ssock, backlog);
- inet_sk_state_store(sk, inet_sk_state_load(ssock->sk));
+ lock_sock(ssk);
+ err = __inet_listen_sk(ssk, backlog);
+ release_sock(ssk);
+ inet_sk_state_store(sk, inet_sk_state_load(ssk));
+
if (!err) {
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- mptcp_copy_inaddrs(sk, ssock->sk);
- mptcp_event_pm_listener(ssock->sk, MPTCP_EVENT_LISTENER_CREATED);
+ mptcp_copy_inaddrs(sk, ssk);
+ mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
}
unlock:
@@ -3734,8 +3727,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
int flags, bool kern)
{
struct mptcp_sock *msk = mptcp_sk(sock->sk);
- struct socket *ssock;
- struct sock *newsk;
+ struct sock *ssk, *newsk;
int err;
pr_debug("msk=%p", msk);
@@ -3743,11 +3735,11 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
/* Buggy applications can call accept on socket states other then LISTEN
* but no need to allocate the first subflow just to error out.
*/
- ssock = READ_ONCE(msk->subflow);
- if (!ssock)
+ ssk = READ_ONCE(msk->first);
+ if (!ssk)
return -EINVAL;
- newsk = mptcp_accept(sock->sk, flags, &err, kern);
+ newsk = mptcp_accept(ssk, flags, &err, kern);
if (!newsk)
return err;
@@ -3774,11 +3766,10 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
/* Do late cleanup for the first subflow as necessary. Also
* deal with bad peers not doing a complete shutdown.
*/
- if (msk->first &&
- unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
+ if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
__mptcp_close_ssk(newsk, msk->first,
mptcp_subflow_ctx(msk->first), 0);
- if (unlikely(list_empty(&msk->conn_list)))
+ if (unlikely(list_is_singular(&msk->conn_list)))
inet_sk_state_store(newsk, TCP_CLOSE);
}
}
@@ -3817,12 +3808,12 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
state = inet_sk_state_load(sk);
pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
if (state == TCP_LISTEN) {
- struct socket *ssock = READ_ONCE(msk->subflow);
+ struct sock *ssk = READ_ONCE(msk->first);
- if (WARN_ON_ONCE(!ssock || !ssock->sk))
+ if (WARN_ON_ONCE(!ssk))
return 0;
- return inet_csk_listen_poll(ssock->sk);
+ return inet_csk_listen_poll(ssk);
}
shutdown = READ_ONCE(sk->sk_shutdown);
@@ -3837,7 +3828,8 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
mask |= EPOLLOUT | EPOLLWRNORM;
else
mask |= mptcp_check_writeable(msk);
- } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
+ } else if (state == TCP_SYN_SENT &&
+ inet_test_bit(DEFER_CONNECT, sk)) {
/* cf tcp_poll() note about TFO */
mask |= EPOLLOUT | EPOLLWRNORM;
}
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 79fc5cdb67bc..38c7ea013361 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -299,7 +299,8 @@ struct mptcp_sock {
cork:1,
nodelay:1,
fastopening:1,
- in_accept_queue:1;
+ in_accept_queue:1,
+ free_first:1;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
@@ -308,12 +309,10 @@ struct mptcp_sock {
struct list_head rtx_queue;
struct mptcp_data_frag *first_pending;
struct list_head join_list;
- struct socket *subflow; /* outgoing connect/listener/!mp_capable
- * The mptcp ops can safely dereference, using suitable
- * ONCE annotation, the subflow outside the socket
- * lock as such sock is freed after close().
- */
- struct sock *first;
+ struct sock *first; /* The mptcp ops can safely dereference, using suitable
+ * ONCE annotation, the subflow outside the socket
+ * lock as such sock is freed after close().
+ */
struct mptcp_pm_data pm;
struct {
u32 space; /* bytes copied in last measurement window */
@@ -640,7 +639,7 @@ void __mptcp_subflow_send_ack(struct sock *ssk);
void mptcp_subflow_reset(struct sock *ssk);
void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
void mptcp_sock_graft(struct sock *sk, struct socket *parent);
-struct socket *__mptcp_nmpc_socket(struct mptcp_sock *msk);
+struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk);
bool __mptcp_close(struct sock *sk, long timeout);
void mptcp_cancel_work(struct sock *sk);
void __mptcp_unaccepted_force_close(struct sock *sk);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index a3f1fe810cc9..8260202c0066 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -292,7 +292,7 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = (struct sock *)msk;
- struct socket *ssock;
+ struct sock *ssk;
int ret;
switch (optname) {
@@ -301,22 +301,22 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
case SO_BINDTODEVICE:
case SO_BINDTOIFINDEX:
lock_sock(sk);
- ssock = __mptcp_nmpc_socket(msk);
- if (IS_ERR(ssock)) {
+ ssk = __mptcp_nmpc_sk(msk);
+ if (IS_ERR(ssk)) {
release_sock(sk);
- return PTR_ERR(ssock);
+ return PTR_ERR(ssk);
}
- ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen);
+ ret = sk_setsockopt(ssk, SOL_SOCKET, optname, optval, optlen);
if (ret == 0) {
if (optname == SO_REUSEPORT)
- sk->sk_reuseport = ssock->sk->sk_reuseport;
+ sk->sk_reuseport = ssk->sk_reuseport;
else if (optname == SO_REUSEADDR)
- sk->sk_reuse = ssock->sk->sk_reuse;
+ sk->sk_reuse = ssk->sk_reuse;
else if (optname == SO_BINDTODEVICE)
- sk->sk_bound_dev_if = ssock->sk->sk_bound_dev_if;
+ sk->sk_bound_dev_if = ssk->sk_bound_dev_if;
else if (optname == SO_BINDTOIFINDEX)
- sk->sk_bound_dev_if = ssock->sk->sk_bound_dev_if;
+ sk->sk_bound_dev_if = ssk->sk_bound_dev_if;
}
release_sock(sk);
return ret;
@@ -390,20 +390,20 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
{
struct sock *sk = (struct sock *)msk;
int ret = -EOPNOTSUPP;
- struct socket *ssock;
+ struct sock *ssk;
switch (optname) {
case IPV6_V6ONLY:
case IPV6_TRANSPARENT:
case IPV6_FREEBIND:
lock_sock(sk);
- ssock = __mptcp_nmpc_socket(msk);
- if (IS_ERR(ssock)) {
+ ssk = __mptcp_nmpc_sk(msk);
+ if (IS_ERR(ssk)) {
release_sock(sk);
- return PTR_ERR(ssock);
+ return PTR_ERR(ssk);
}
- ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
+ ret = tcp_setsockopt(ssk, SOL_IPV6, optname, optval, optlen);
if (ret != 0) {
release_sock(sk);
return ret;
@@ -413,13 +413,15 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
switch (optname) {
case IPV6_V6ONLY:
- sk->sk_ipv6only = ssock->sk->sk_ipv6only;
+ sk->sk_ipv6only = ssk->sk_ipv6only;
break;
case IPV6_TRANSPARENT:
- inet_sk(sk)->transparent = inet_sk(ssock->sk)->transparent;
+ inet_assign_bit(TRANSPARENT, sk,
+ inet_test_bit(TRANSPARENT, ssk));
break;
case IPV6_FREEBIND:
- inet_sk(sk)->freebind = inet_sk(ssock->sk)->freebind;
+ inet_assign_bit(FREEBIND, sk,
+ inet_test_bit(FREEBIND, ssk));
break;
}
@@ -684,8 +686,7 @@ static int mptcp_setsockopt_sol_ip_set_transparent(struct mptcp_sock *msk, int o
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = (struct sock *)msk;
- struct inet_sock *issk;
- struct socket *ssock;
+ struct sock *ssk;
int err;
err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen);
@@ -694,20 +695,19 @@ static int mptcp_setsockopt_sol_ip_set_transparent(struct mptcp_sock *msk, int o
lock_sock(sk);
- ssock = __mptcp_nmpc_socket(msk);
- if (IS_ERR(ssock)) {
+ ssk = __mptcp_nmpc_sk(msk);
+ if (IS_ERR(ssk)) {
release_sock(sk);
- return PTR_ERR(ssock);
+ return PTR_ERR(ssk);
}
- issk = inet_sk(ssock->sk);
-
switch (optname) {
case IP_FREEBIND:
- issk->freebind = inet_sk(sk)->freebind;
+ inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
break;
case IP_TRANSPARENT:
- issk->transparent = inet_sk(sk)->transparent;
+ inet_assign_bit(TRANSPARENT, ssk,
+ inet_test_bit(TRANSPARENT, sk));
break;
default:
release_sock(sk);
@@ -763,18 +763,18 @@ static int mptcp_setsockopt_first_sf_only(struct mptcp_sock *msk, int level, int
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = (struct sock *)msk;
- struct socket *sock;
+ struct sock *ssk;
int ret;
/* Limit to first subflow, before the connection establishment */
lock_sock(sk);
- sock = __mptcp_nmpc_socket(msk);
- if (IS_ERR(sock)) {
- ret = PTR_ERR(sock);
+ ssk = __mptcp_nmpc_sk(msk);
+ if (IS_ERR(ssk)) {
+ ret = PTR_ERR(ssk);
goto unlock;
}
- ret = tcp_setsockopt(sock->sk, level, optname, optval, optlen);
+ ret = tcp_setsockopt(ssk, level, optname, optval, optlen);
unlock:
release_sock(sk);
@@ -864,9 +864,8 @@ static int mptcp_getsockopt_first_sf_only(struct mptcp_sock *msk, int level, int
char __user *optval, int __user *optlen)
{
struct sock *sk = (struct sock *)msk;
- struct socket *ssock;
- int ret;
struct sock *ssk;
+ int ret;
lock_sock(sk);
ssk = msk->first;
@@ -875,13 +874,13 @@ static int mptcp_getsockopt_first_sf_only(struct mptcp_sock *msk, int level, int
goto out;
}
- ssock = __mptcp_nmpc_socket(msk);
- if (IS_ERR(ssock)) {
- ret = PTR_ERR(ssock);
+ ssk = __mptcp_nmpc_sk(msk);
+ if (IS_ERR(ssk)) {
+ ret = PTR_ERR(ssk);
goto out;
}
- ret = tcp_getsockopt(ssock->sk, level, optname, optval, optlen);
+ ret = tcp_getsockopt(ssk, level, optname, optval, optlen);
out:
release_sock(sk);
@@ -1441,8 +1440,8 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
__tcp_sock_set_cork(ssk, !!msk->cork);
__tcp_sock_set_nodelay(ssk, !!msk->nodelay);
- inet_sk(ssk)->transparent = inet_sk(sk)->transparent;
- inet_sk(ssk)->freebind = inet_sk(sk)->freebind;
+ inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk));
+ inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
}
static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index d27f4eccce6d..a3a6753a1db7 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -563,7 +563,7 @@ int ncsi_send_netlink_timeout(struct ncsi_request *nr,
int ncsi_send_netlink_err(struct net_device *dev,
u32 snd_seq,
u32 snd_portid,
- struct nlmsghdr *nlhdr,
+ const struct nlmsghdr *nlhdr,
int err)
{
struct nlmsghdr *nlh;
diff --git a/net/ncsi/ncsi-netlink.h b/net/ncsi/ncsi-netlink.h
index 39a1a9d7bf77..747767ea0aae 100644
--- a/net/ncsi/ncsi-netlink.h
+++ b/net/ncsi/ncsi-netlink.h
@@ -19,7 +19,7 @@ int ncsi_send_netlink_timeout(struct ncsi_request *nr,
int ncsi_send_netlink_err(struct net_device *dev,
u32 snd_seq,
u32 snd_portid,
- struct nlmsghdr *nlhdr,
+ const struct nlmsghdr *nlhdr,
int err);
#endif /* __NCSI_NETLINK_H__ */
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index cb83ca506c5c..3230506ae3ff 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1346,7 +1346,7 @@ ip_vs_out_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *stat
if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
af == AF_INET)) {
- if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
+ if (sk->sk_family == PF_INET && inet_test_bit(NODEFRAG, sk))
return NF_ACCEPT;
}
@@ -1946,7 +1946,7 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
af == AF_INET)) {
- if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
+ if (sk->sk_family == PF_INET && inet_test_bit(NODEFRAG, sk))
return NF_ACCEPT;
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 264f2f87a437..da5af28ff57b 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1297,11 +1297,9 @@ static void set_sock_size(struct sock *sk, int mode, int val)
*/
static void set_mcast_loop(struct sock *sk, u_char loop)
{
- struct inet_sock *inet = inet_sk(sk);
-
/* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */
lock_sock(sk);
- inet->mc_loop = loop ? 1 : 0;
+ inet_assign_bit(MC_LOOP, sk, loop);
#ifdef CONFIG_IP_VS_IPV6
if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 96c605e45235..642b9d382fb4 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -84,7 +84,7 @@ struct listeners {
static inline int netlink_is_kernel(struct sock *sk)
{
- return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
+ return nlk_test_bit(KERNEL_SOCKET, sk);
}
struct netlink_table *nl_table __read_mostly;
@@ -349,9 +349,7 @@ static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
static void netlink_overrun(struct sock *sk)
{
- struct netlink_sock *nlk = nlk_sk(sk);
-
- if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
+ if (!nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
if (!test_and_set_bit(NETLINK_S_CONGESTED,
&nlk_sk(sk)->state)) {
sk->sk_err = ENOBUFS;
@@ -1407,9 +1405,7 @@ EXPORT_SYMBOL_GPL(netlink_has_listeners);
bool netlink_strict_get_check(struct sk_buff *skb)
{
- const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
-
- return nlk->flags & NETLINK_F_STRICT_CHK;
+ return nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
}
EXPORT_SYMBOL_GPL(netlink_strict_get_check);
@@ -1455,7 +1451,7 @@ static void do_one_broadcast(struct sock *sk,
return;
if (!net_eq(sock_net(sk), p->net)) {
- if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
+ if (!nlk_test_bit(LISTEN_ALL_NSID, sk))
return;
if (!peernet_has_id(sock_net(sk), p->net))
@@ -1488,7 +1484,7 @@ static void do_one_broadcast(struct sock *sk,
netlink_overrun(sk);
/* Clone failed. Notify ALL listeners. */
p->failure = 1;
- if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
+ if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
p->delivery_failure = 1;
goto out;
}
@@ -1510,7 +1506,7 @@ static void do_one_broadcast(struct sock *sk,
val = netlink_broadcast_deliver(sk, p->skb2);
if (val < 0) {
netlink_overrun(sk);
- if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
+ if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
p->delivery_failure = 1;
} else {
p->congested |= val;
@@ -1604,7 +1600,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
!test_bit(p->group - 1, nlk->groups))
goto out;
- if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
+ if (p->code == ENOBUFS && nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
ret = 1;
goto out;
}
@@ -1668,7 +1664,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
unsigned int val = 0;
- int err;
+ int nr = -1;
if (level != SOL_NETLINK)
return -ENOPROTOOPT;
@@ -1679,14 +1675,12 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case NETLINK_PKTINFO:
- if (val)
- nlk->flags |= NETLINK_F_RECV_PKTINFO;
- else
- nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
- err = 0;
+ nr = NETLINK_F_RECV_PKTINFO;
break;
case NETLINK_ADD_MEMBERSHIP:
case NETLINK_DROP_MEMBERSHIP: {
+ int err;
+
if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
return -EPERM;
err = netlink_realloc_groups(sk);
@@ -1706,61 +1700,38 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
nlk->netlink_unbind(sock_net(sk), val);
- err = 0;
break;
}
case NETLINK_BROADCAST_ERROR:
- if (val)
- nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
- else
- nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
- err = 0;
+ nr = NETLINK_F_BROADCAST_SEND_ERROR;
break;
case NETLINK_NO_ENOBUFS:
+ assign_bit(NETLINK_F_RECV_NO_ENOBUFS, &nlk->flags, val);
if (val) {
- nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
clear_bit(NETLINK_S_CONGESTED, &nlk->state);
wake_up_interruptible(&nlk->wait);
- } else {
- nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
}
- err = 0;
break;
case NETLINK_LISTEN_ALL_NSID:
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
return -EPERM;
-
- if (val)
- nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
- else
- nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
- err = 0;
+ nr = NETLINK_F_LISTEN_ALL_NSID;
break;
case NETLINK_CAP_ACK:
- if (val)
- nlk->flags |= NETLINK_F_CAP_ACK;
- else
- nlk->flags &= ~NETLINK_F_CAP_ACK;
- err = 0;
+ nr = NETLINK_F_CAP_ACK;
break;
case NETLINK_EXT_ACK:
- if (val)
- nlk->flags |= NETLINK_F_EXT_ACK;
- else
- nlk->flags &= ~NETLINK_F_EXT_ACK;
- err = 0;
+ nr = NETLINK_F_EXT_ACK;
break;
case NETLINK_GET_STRICT_CHK:
- if (val)
- nlk->flags |= NETLINK_F_STRICT_CHK;
- else
- nlk->flags &= ~NETLINK_F_STRICT_CHK;
- err = 0;
+ nr = NETLINK_F_STRICT_CHK;
break;
default:
- err = -ENOPROTOOPT;
+ return -ENOPROTOOPT;
}
- return err;
+ if (nr >= 0)
+ assign_bit(nr, &nlk->flags, val);
+ return 0;
}
static int netlink_getsockopt(struct socket *sock, int level, int optname,
@@ -1827,7 +1798,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
return -EINVAL;
len = sizeof(int);
- val = nlk->flags & flag ? 1 : 0;
+ val = test_bit(flag, &nlk->flags);
if (put_user(len, optlen) ||
copy_to_user(optval, &val, len))
@@ -2004,9 +1975,9 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
msg->msg_namelen = sizeof(*addr);
}
- if (nlk->flags & NETLINK_F_RECV_PKTINFO)
+ if (nlk_test_bit(RECV_PKTINFO, sk))
netlink_cmsg_recv_pktinfo(msg, skb);
- if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
+ if (nlk_test_bit(LISTEN_ALL_NSID, sk))
netlink_cmsg_listen_all_nsid(sk, msg, skb);
memset(&scm, 0, sizeof(scm));
@@ -2083,7 +2054,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
goto out_sock_release;
nlk = nlk_sk(sk);
- nlk->flags |= NETLINK_F_KERNEL_SOCKET;
+ set_bit(NETLINK_F_KERNEL_SOCKET, &nlk->flags);
netlink_table_grab();
if (!nl_table[unit].registered) {
@@ -2218,7 +2189,7 @@ static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
nl_dump_check_consistent(cb, nlh);
memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno));
- if (extack->_msg && nlk->flags & NETLINK_F_EXT_ACK) {
+ if (extack->_msg && test_bit(NETLINK_F_EXT_ACK, &nlk->flags)) {
nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg))
nlmsg_end(skb, nlh);
@@ -2347,8 +2318,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control)
{
- struct netlink_sock *nlk, *nlk2;
struct netlink_callback *cb;
+ struct netlink_sock *nlk;
struct sock *sk;
int ret;
@@ -2383,8 +2354,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
cb->min_dump_alloc = control->min_dump_alloc;
cb->skb = skb;
- nlk2 = nlk_sk(NETLINK_CB(skb).sk);
- cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK);
+ cb->strict_check = nlk_test_bit(STRICT_CHK, NETLINK_CB(skb).sk);
if (control->start) {
cb->extack = control->extack;
@@ -2428,7 +2398,7 @@ netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
{
size_t tlvlen;
- if (!extack || !(nlk->flags & NETLINK_F_EXT_ACK))
+ if (!extack || !test_bit(NETLINK_F_EXT_ACK, &nlk->flags))
return 0;
tlvlen = 0;
@@ -2500,7 +2470,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
* requests to cap the error message, and get extra error data if
* requested.
*/
- if (err && !(nlk->flags & NETLINK_F_CAP_ACK))
+ if (err && !test_bit(NETLINK_F_CAP_ACK, &nlk->flags))
payload += nlmsg_len(nlh);
else
flags |= NLM_F_CAPPED;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index fd424cd63f31..2145979b9986 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -8,14 +8,16 @@
#include <net/sock.h>
/* flags */
-#define NETLINK_F_KERNEL_SOCKET 0x1
-#define NETLINK_F_RECV_PKTINFO 0x2
-#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
-#define NETLINK_F_RECV_NO_ENOBUFS 0x8
-#define NETLINK_F_LISTEN_ALL_NSID 0x10
-#define NETLINK_F_CAP_ACK 0x20
-#define NETLINK_F_EXT_ACK 0x40
-#define NETLINK_F_STRICT_CHK 0x80
+enum {
+ NETLINK_F_KERNEL_SOCKET,
+ NETLINK_F_RECV_PKTINFO,
+ NETLINK_F_BROADCAST_SEND_ERROR,
+ NETLINK_F_RECV_NO_ENOBUFS,
+ NETLINK_F_LISTEN_ALL_NSID,
+ NETLINK_F_CAP_ACK,
+ NETLINK_F_EXT_ACK,
+ NETLINK_F_STRICT_CHK,
+};
#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
@@ -23,10 +25,10 @@
struct netlink_sock {
/* struct sock has to be the first member of netlink_sock */
struct sock sk;
+ unsigned long flags;
u32 portid;
u32 dst_portid;
u32 dst_group;
- u32 flags;
u32 subscriptions;
u32 ngroups;
unsigned long *groups;
@@ -56,6 +58,8 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
return container_of(sk, struct netlink_sock, sk);
}
+#define nlk_test_bit(nr, sk) test_bit(NETLINK_F_##nr, &nlk_sk(sk)->flags)
+
struct netlink_table {
struct rhashtable hash;
struct hlist_head mc_list;
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index e4f21b1067bc..9c4f231be275 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -27,15 +27,15 @@ static int sk_diag_put_flags(struct sock *sk, struct sk_buff *skb)
if (nlk->cb_running)
flags |= NDIAG_FLAG_CB_RUNNING;
- if (nlk->flags & NETLINK_F_RECV_PKTINFO)
+ if (nlk_test_bit(RECV_PKTINFO, sk))
flags |= NDIAG_FLAG_PKTINFO;
- if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
+ if (nlk_test_bit(BROADCAST_SEND_ERROR, sk))
flags |= NDIAG_FLAG_BROADCAST_ERROR;
- if (nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)
+ if (nlk_test_bit(RECV_NO_ENOBUFS, sk))
flags |= NDIAG_FLAG_NO_ENOBUFS;
- if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
+ if (nlk_test_bit(LISTEN_ALL_NSID, sk))
flags |= NDIAG_FLAG_LISTEN_ALL_NSID;
- if (nlk->flags & NETLINK_F_CAP_ACK)
+ if (nlk_test_bit(CAP_ACK, sk))
flags |= NDIAG_FLAG_CAP_ACK;
return nla_put_u32(skb, NETLINK_DIAG_FLAGS, flags);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 6bd2ce51271f..8315d31b53db 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -52,6 +52,18 @@ static void genl_unlock_all(void)
up_write(&cb_lock);
}
+static void genl_op_lock(const struct genl_family *family)
+{
+ if (!family->parallel_ops)
+ genl_lock();
+}
+
+static void genl_op_unlock(const struct genl_family *family)
+{
+ if (!family->parallel_ops)
+ genl_unlock();
+}
+
static DEFINE_IDR(genl_fam_idr);
/*
@@ -832,64 +844,63 @@ static int genl_start(struct netlink_callback *cb)
genl_family_rcv_msg_attrs_free(attrs);
return -ENOMEM;
}
- info->family = ctx->family;
info->op = *ops;
- info->attrs = attrs;
+ info->info.family = ctx->family;
+ info->info.snd_seq = cb->nlh->nlmsg_seq;
+ info->info.snd_portid = NETLINK_CB(cb->skb).portid;
+ info->info.nlhdr = cb->nlh;
+ info->info.genlhdr = nlmsg_data(cb->nlh);
+ info->info.attrs = attrs;
+ genl_info_net_set(&info->info, sock_net(cb->skb->sk));
+ info->info.extack = cb->extack;
+ memset(&info->info.user_ptr, 0, sizeof(info->info.user_ptr));
cb->data = info;
if (ops->start) {
- if (!ctx->family->parallel_ops)
- genl_lock();
+ genl_op_lock(ctx->family);
rc = ops->start(cb);
- if (!ctx->family->parallel_ops)
- genl_unlock();
+ genl_op_unlock(ctx->family);
}
if (rc) {
- genl_family_rcv_msg_attrs_free(info->attrs);
+ genl_family_rcv_msg_attrs_free(info->info.attrs);
genl_dumpit_info_free(info);
cb->data = NULL;
}
return rc;
}
-static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+static int genl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
- const struct genl_split_ops *ops = &genl_dumpit_info(cb)->op;
+ struct genl_dumpit_info *dump_info = cb->data;
+ const struct genl_split_ops *ops = &dump_info->op;
+ struct genl_info *info = &dump_info->info;
int rc;
- genl_lock();
+ info->extack = cb->extack;
+
+ genl_op_lock(info->family);
rc = ops->dumpit(skb, cb);
- genl_unlock();
+ genl_op_unlock(info->family);
return rc;
}
-static int genl_lock_done(struct netlink_callback *cb)
+static int genl_done(struct netlink_callback *cb)
{
- const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- const struct genl_split_ops *ops = &info->op;
+ struct genl_dumpit_info *dump_info = cb->data;
+ const struct genl_split_ops *ops = &dump_info->op;
+ struct genl_info *info = &dump_info->info;
int rc = 0;
+ info->extack = cb->extack;
+
if (ops->done) {
- genl_lock();
+ genl_op_lock(info->family);
rc = ops->done(cb);
- genl_unlock();
+ genl_op_unlock(info->family);
}
genl_family_rcv_msg_attrs_free(info->attrs);
- genl_dumpit_info_free(info);
- return rc;
-}
-
-static int genl_parallel_done(struct netlink_callback *cb)
-{
- const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- const struct genl_split_ops *ops = &info->op;
- int rc = 0;
-
- if (ops->done)
- rc = ops->done(cb);
- genl_family_rcv_msg_attrs_free(info->attrs);
- genl_dumpit_info_free(info);
+ genl_dumpit_info_free(dump_info);
return rc;
}
@@ -901,6 +912,14 @@ static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
int hdrlen, struct net *net)
{
struct genl_start_context ctx;
+ struct netlink_dump_control c = {
+ .module = family->module,
+ .data = &ctx,
+ .start = genl_start,
+ .dump = genl_dumpit,
+ .done = genl_done,
+ .extack = extack,
+ };
int err;
ctx.family = family;
@@ -909,31 +928,9 @@ static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
ctx.ops = ops;
ctx.hdrlen = hdrlen;
- if (!family->parallel_ops) {
- struct netlink_dump_control c = {
- .module = family->module,
- .data = &ctx,
- .start = genl_start,
- .dump = genl_lock_dumpit,
- .done = genl_lock_done,
- .extack = extack,
- };
-
- genl_unlock();
- err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
- genl_lock();
- } else {
- struct netlink_dump_control c = {
- .module = family->module,
- .data = &ctx,
- .start = genl_start,
- .dump = ops->dumpit,
- .done = genl_parallel_done,
- .extack = extack,
- };
-
- err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
- }
+ genl_op_unlock(family);
+ err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
+ genl_op_lock(family);
return err;
}
@@ -957,9 +954,9 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
info.snd_seq = nlh->nlmsg_seq;
info.snd_portid = NETLINK_CB(skb).portid;
+ info.family = family;
info.nlhdr = nlh;
info.genlhdr = nlmsg_data(nlh);
- info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
info.attrs = attrbuf;
info.extack = extack;
genl_info_net_set(&info, net);
@@ -1065,13 +1062,9 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
if (family == NULL)
return -ENOENT;
- if (!family->parallel_ops)
- genl_lock();
-
+ genl_op_lock(family);
err = genl_family_rcv_msg(family, skb, nlh, extack);
-
- if (!family->parallel_ops)
- genl_unlock();
+ genl_op_unlock(family);
return err;
}
@@ -1396,7 +1389,7 @@ static int ctrl_dumppolicy_start(struct netlink_callback *cb)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
- struct nlattr **tb = info->attrs;
+ struct nlattr **tb = info->info.attrs;
const struct genl_family *rt;
struct genl_op_iter i;
int err;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index e9ac6a6f934e..aa1dbf654c3e 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -110,10 +110,10 @@ static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb)
struct nfc_dev *dev;
u32 idx;
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ if (!info->info.attrs[NFC_ATTR_DEVICE_INDEX])
return ERR_PTR(-EINVAL);
- idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+ idx = nla_get_u32(info->info.attrs[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev)
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index cab1e02b63e0..fd66014d8a76 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -27,6 +27,7 @@
#include <net/sctp/checksum.h>
#include "datapath.h"
+#include "drop.h"
#include "flow.h"
#include "conntrack.h"
#include "vport.h"
@@ -781,7 +782,7 @@ static int ovs_vport_output(struct net *net, struct sock *sk,
struct vport *vport = data->vport;
if (skb_cow_head(skb, data->l2_len) < 0) {
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
return -ENOMEM;
}
@@ -852,6 +853,7 @@ static void ovs_fragment(struct net *net, struct vport *vport,
struct sk_buff *skb, u16 mru,
struct sw_flow_key *key)
{
+ enum ovs_drop_reason reason;
u16 orig_network_offset = 0;
if (eth_p_mpls(skb->protocol)) {
@@ -861,6 +863,7 @@ static void ovs_fragment(struct net *net, struct vport *vport,
if (skb_network_offset(skb) > MAX_L2_LEN) {
OVS_NLERR(1, "L2 header too long to fragment");
+ reason = OVS_DROP_FRAG_L2_TOO_LONG;
goto err;
}
@@ -901,12 +904,13 @@ static void ovs_fragment(struct net *net, struct vport *vport,
WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
ovs_vport_name(vport), ntohs(key->eth.type), mru,
vport->dev->mtu);
+ reason = OVS_DROP_FRAG_INVALID_PROTO;
goto err;
}
return;
err:
- kfree_skb(skb);
+ ovs_kfree_skb_reason(skb, reason);
}
static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
@@ -933,10 +937,10 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
ovs_fragment(net, vport, skb, mru, key);
} else {
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_PKT_TOO_BIG);
}
} else {
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY);
}
}
@@ -1010,7 +1014,7 @@ static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
return clone_execute(dp, skb, key, 0, nla_data(actions),
nla_len(actions), true, false);
- consume_skb(skb);
+ ovs_kfree_skb_reason(skb, OVS_DROP_IP_TTL);
return 0;
}
@@ -1036,7 +1040,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
if ((arg->probability != U32_MAX) &&
(!arg->probability || get_random_u32() > arg->probability)) {
if (last)
- consume_skb(skb);
+ ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
return 0;
}
@@ -1297,6 +1301,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
if (trace_ovs_do_execute_action_enabled())
trace_ovs_do_execute_action(dp, skb, key, a, rem);
+ /* Actions that rightfully have to consume the skb should do it
+ * and return directly.
+ */
switch (nla_type(a)) {
case OVS_ACTION_ATTR_OUTPUT: {
int port = nla_get_u32(a);
@@ -1332,6 +1339,10 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
output_userspace(dp, skb, key, a, attr,
len, OVS_CB(skb)->cutlen);
OVS_CB(skb)->cutlen = 0;
+ if (nla_is_last(a, rem)) {
+ consume_skb(skb);
+ return 0;
+ }
break;
case OVS_ACTION_ATTR_HASH:
@@ -1446,7 +1457,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
case OVS_ACTION_ATTR_METER:
if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) {
- consume_skb(skb);
+ ovs_kfree_skb_reason(skb, OVS_DROP_METER);
return 0;
}
break;
@@ -1477,15 +1488,24 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
return dec_ttl_exception_handler(dp, skb,
key, a);
break;
+
+ case OVS_ACTION_ATTR_DROP: {
+ enum ovs_drop_reason reason = nla_get_u32(a)
+ ? OVS_DROP_EXPLICIT_WITH_ERROR
+ : OVS_DROP_EXPLICIT;
+
+ ovs_kfree_skb_reason(skb, reason);
+ return 0;
+ }
}
if (unlikely(err)) {
- kfree_skb(skb);
+ ovs_kfree_skb_reason(skb, OVS_DROP_ACTION_ERROR);
return err;
}
}
- consume_skb(skb);
+ ovs_kfree_skb_reason(skb, OVS_DROP_LAST_ACTION);
return 0;
}
@@ -1547,7 +1567,7 @@ static int clone_execute(struct datapath *dp, struct sk_buff *skb,
/* Out of per CPU action FIFO space. Drop the 'skb' and
* log an error.
*/
- kfree_skb(skb);
+ ovs_kfree_skb_reason(skb, OVS_DROP_DEFERRED_LIMIT);
if (net_ratelimit()) {
if (actions) { /* Sample action */
@@ -1599,7 +1619,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
if (unlikely(level > OVS_RECURSION_LIMIT)) {
net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
ovs_dp_name(dp));
- kfree_skb(skb);
+ ovs_kfree_skb_reason(skb, OVS_DROP_RECURSION_LIMIT);
err = -ENETDOWN;
goto out;
}
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index fa955e892210..0b9a785dea45 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -29,6 +29,7 @@
#include <net/netfilter/nf_conntrack_act_ct.h>
#include "datapath.h"
+#include "drop.h"
#include "conntrack.h"
#include "flow.h"
#include "flow_netlink.h"
@@ -1035,7 +1036,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
skb_push_rcsum(skb, nh_ofs);
if (err)
- kfree_skb(skb);
+ ovs_kfree_skb_reason(skb, OVS_DROP_CONNTRACK);
return err;
}
@@ -1604,7 +1605,7 @@ static struct sk_buff *
ovs_ct_limit_cmd_reply_start(struct genl_info *info, u8 cmd,
struct ovs_header **ovs_reply_header)
{
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct sk_buff *skb;
skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index a6d2a0b1aa21..0a974eeef76e 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -41,6 +41,7 @@
#include <net/pkt_cls.h>
#include "datapath.h"
+#include "drop.h"
#include "flow.h"
#include "flow_table.h"
#include "flow_netlink.h"
@@ -589,7 +590,7 @@ out:
static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct net *net = sock_net(skb->sk);
struct nlattr **a = info->attrs;
struct sw_flow_actions *acts;
@@ -966,7 +967,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
struct nlattr **a = info->attrs;
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct sw_flow *flow = NULL, *new_flow;
struct sw_flow_mask mask;
struct sk_buff *reply;
@@ -1213,7 +1214,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
struct nlattr **a = info->attrs;
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct sw_flow_key key;
struct sw_flow *flow;
struct sk_buff *reply = NULL;
@@ -1314,7 +1315,7 @@ error:
static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct net *net = sock_net(skb->sk);
struct sw_flow_key key;
struct sk_buff *reply;
@@ -1373,7 +1374,7 @@ unlock:
static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct net *net = sock_net(skb->sk);
struct sw_flow_key key;
struct sk_buff *reply;
@@ -1641,7 +1642,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb,
{
struct datapath *dp;
- dp = lookup_datapath(sock_net(skb->sk), info->userhdr,
+ dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
info->attrs);
if (IS_ERR(dp))
return;
@@ -1934,7 +1935,8 @@ static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
ovs_lock();
- dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
+ dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
+ info->attrs);
err = PTR_ERR(dp);
if (IS_ERR(dp))
goto err_unlock_free;
@@ -1967,7 +1969,8 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
ovs_lock();
- dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
+ dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
+ info->attrs);
err = PTR_ERR(dp);
if (IS_ERR(dp))
goto err_unlock_free;
@@ -2002,7 +2005,8 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
ovs_lock();
- dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
+ dp = lookup_datapath(sock_net(skb->sk), genl_info_userhdr(info),
+ info->attrs);
if (IS_ERR(dp)) {
err = PTR_ERR(dp);
goto err_unlock_free;
@@ -2245,7 +2249,7 @@ static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct vport_parms parms;
struct sk_buff *reply;
struct vport *vport;
@@ -2347,7 +2351,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
ovs_lock();
- vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
+ vport = lookup_vport(sock_net(skb->sk), genl_info_userhdr(info), a);
err = PTR_ERR(vport);
if (IS_ERR(vport))
goto exit_unlock_free;
@@ -2403,7 +2407,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
ovs_lock();
- vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
+ vport = lookup_vport(sock_net(skb->sk), genl_info_userhdr(info), a);
err = PTR_ERR(vport);
if (IS_ERR(vport))
goto exit_unlock_free;
@@ -2446,7 +2450,7 @@ exit_unlock_free:
static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct sk_buff *reply;
struct vport *vport;
int err;
@@ -2702,6 +2706,17 @@ static struct pernet_operations ovs_net_ops = {
.size = sizeof(struct ovs_net),
};
+static const char * const ovs_drop_reasons[] = {
+#define S(x) (#x),
+ OVS_DROP_REASONS(S)
+#undef S
+};
+
+static struct drop_reason_list drop_reason_list_ovs = {
+ .reasons = ovs_drop_reasons,
+ .n_reasons = ARRAY_SIZE(ovs_drop_reasons),
+};
+
static int __init dp_init(void)
{
int err;
@@ -2743,6 +2758,9 @@ static int __init dp_init(void)
if (err < 0)
goto error_unreg_netdev;
+ drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_OPENVSWITCH,
+ &drop_reason_list_ovs);
+
return 0;
error_unreg_netdev:
@@ -2769,6 +2787,7 @@ static void dp_cleanup(void)
ovs_netdev_exit();
unregister_netdevice_notifier(&ovs_dp_device_notifier);
unregister_pernet_device(&ovs_net_ops);
+ drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_OPENVSWITCH);
rcu_barrier();
ovs_vport_exit();
ovs_flow_exit();
diff --git a/net/openvswitch/drop.h b/net/openvswitch/drop.h
new file mode 100644
index 000000000000..cedf9b7b5796
--- /dev/null
+++ b/net/openvswitch/drop.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * OpenvSwitch drop reason list.
+ */
+
+#ifndef OPENVSWITCH_DROP_H
+#define OPENVSWITCH_DROP_H
+#include <linux/skbuff.h>
+#include <net/dropreason.h>
+
+#define OVS_DROP_REASONS(R) \
+ R(OVS_DROP_LAST_ACTION) \
+ R(OVS_DROP_ACTION_ERROR) \
+ R(OVS_DROP_EXPLICIT) \
+ R(OVS_DROP_EXPLICIT_WITH_ERROR) \
+ R(OVS_DROP_METER) \
+ R(OVS_DROP_RECURSION_LIMIT) \
+ R(OVS_DROP_DEFERRED_LIMIT) \
+ R(OVS_DROP_FRAG_L2_TOO_LONG) \
+ R(OVS_DROP_FRAG_INVALID_PROTO) \
+ R(OVS_DROP_CONNTRACK) \
+ R(OVS_DROP_IP_TTL) \
+ /* deliberate comment for trailing \ */
+
+enum ovs_drop_reason {
+ __OVS_DROP_REASON = SKB_DROP_REASON_SUBSYS_OPENVSWITCH <<
+ SKB_DROP_REASON_SUBSYS_SHIFT,
+#define ENUM(x) x,
+ OVS_DROP_REASONS(ENUM)
+#undef ENUM
+
+ OVS_DROP_MAX,
+};
+
+static inline void
+ovs_kfree_skb_reason(struct sk_buff *skb, enum ovs_drop_reason reason)
+{
+ kfree_skb_reason(skb, (u32)reason);
+}
+
+#endif /* OPENVSWITCH_DROP_H */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 41116361433d..88965e2068ac 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -38,6 +38,7 @@
#include <net/tun_proto.h>
#include <net/erspan.h>
+#include "drop.h"
#include "flow_netlink.h"
struct ovs_len_tbl {
@@ -61,6 +62,7 @@ static bool actions_may_change_flow(const struct nlattr *actions)
case OVS_ACTION_ATTR_RECIRC:
case OVS_ACTION_ATTR_TRUNC:
case OVS_ACTION_ATTR_USERSPACE:
+ case OVS_ACTION_ATTR_DROP:
break;
case OVS_ACTION_ATTR_CT:
@@ -2394,7 +2396,7 @@ static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len)
/* Whenever new actions are added, the need to update this
* function should be considered.
*/
- BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 23);
+ BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 24);
if (!actions)
return;
@@ -3182,6 +3184,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
[OVS_ACTION_ATTR_CHECK_PKT_LEN] = (u32)-1,
[OVS_ACTION_ATTR_ADD_MPLS] = sizeof(struct ovs_action_add_mpls),
[OVS_ACTION_ATTR_DEC_TTL] = (u32)-1,
+ [OVS_ACTION_ATTR_DROP] = sizeof(u32),
};
const struct ovs_action_push_vlan *vlan;
int type = nla_type(a);
@@ -3453,6 +3456,11 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
skip_copy = true;
break;
+ case OVS_ACTION_ATTR_DROP:
+ if (!nla_is_last(a, rem))
+ return -EINVAL;
+ break;
+
default:
OVS_NLERR(log, "Unknown Action type %d", type);
return -EINVAL;
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index c4ebf810e4b1..cc08e0403909 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -211,7 +211,7 @@ ovs_meter_cmd_reply_start(struct genl_info *info, u8 cmd,
struct ovs_header **ovs_reply_header)
{
struct sk_buff *skb;
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
@@ -272,7 +272,7 @@ error:
static int ovs_meter_cmd_features(struct sk_buff *skb, struct genl_info *info)
{
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct ovs_header *ovs_reply_header;
struct nlattr *nla, *band_nla;
struct sk_buff *reply;
@@ -409,7 +409,7 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
struct dp_meter *meter, *old_meter;
struct sk_buff *reply;
struct ovs_header *ovs_reply_header;
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct dp_meter_table *meter_tbl;
struct datapath *dp;
int err;
@@ -482,7 +482,7 @@ exit_free_meter:
static int ovs_meter_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct ovs_header *ovs_reply_header;
struct nlattr **a = info->attrs;
struct dp_meter *meter;
@@ -535,7 +535,7 @@ exit_unlock:
static int ovs_meter_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
- struct ovs_header *ovs_header = info->userhdr;
+ struct ovs_header *ovs_header = genl_info_userhdr(info);
struct ovs_header *ovs_reply_header;
struct nlattr **a = info->attrs;
struct dp_meter *old_meter;
diff --git a/net/rds/rdma_transport.h b/net/rds/rdma_transport.h
index ca4c3a667091..d2fdb1529585 100644
--- a/net/rds/rdma_transport.h
+++ b/net/rds/rdma_transport.h
@@ -17,7 +17,6 @@
*/
#define RDS_RDMA_REJ_INCOMPAT 1
-int rds_rdma_conn_connect(struct rds_connection *conn);
int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
diff --git a/net/rds/rds.h b/net/rds/rds.h
index d35d1fc39807..dc360252c515 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -863,7 +863,6 @@ int rds_message_next_extension(struct rds_header *hdr,
unsigned int *pos, void *buf, unsigned int *buflen);
int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
-void rds_message_inc_free(struct rds_incoming *inc);
void rds_message_addref(struct rds_message *rm);
void rds_message_put(struct rds_message *rm);
void rds_message_wait(struct rds_message *rm);
@@ -1013,7 +1012,5 @@ void rds_trans_put(struct rds_transport *trans);
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
struct rds_transport *rds_trans_get(int t_type);
-int rds_trans_init(void);
-void rds_trans_exit(void);
#endif
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index f8b5930d7b34..053aa7da87ef 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -56,7 +56,6 @@ void rds_tcp_restore_callbacks(struct socket *sock,
struct rds_tcp_connection *tc);
u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
-u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
extern struct rds_transport rds_tcp_transport;
void rds_tcp_accept_work(struct sock *sk);
int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr,
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2613c4d74b16..17fcaa9b0df9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -581,7 +581,7 @@ static void sctp_v4_err_handle(struct sctp_transport *t, struct sk_buff *skb,
default:
return;
}
- if (!sock_owned_by_user(sk) && inet_sk(sk)->recverr) {
+ if (!sock_owned_by_user(sk) && inet_test_bit(RECVERR, sk)) {
sk->sk_err = err;
sk_error_report(sk);
} else { /* Only an error on timeout */
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 33c0895e101c..2185f44198de 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -360,7 +360,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
ret = inet_addr_type_table(net, addr->v4.sin_addr.s_addr, tb_id);
if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
ret != RTN_LOCAL &&
- !sp->inet.freebind &&
+ !inet_test_bit(FREEBIND, sk) &&
!READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind))
return 0;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6e3d28aa587c..04b390892827 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -9482,7 +9482,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
newinet->inet_id = get_random_u16();
newinet->uc_ttl = inet->uc_ttl;
- newinet->mc_loop = 1;
+ inet_set_bit(MC_LOOP, newsk);
newinet->mc_ttl = 1;
newinet->mc_index = 0;
newinet->mc_list = NULL;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 9b47c8409231..5bc076f2fa74 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -208,7 +208,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
goto err_out;
}
- info.attrs = attrbuf;
+ info.info.attrs = attrbuf;
if (nlmsg_len(cb.nlh) > 0) {
err = nlmsg_parse_deprecated(cb.nlh, GENL_HDRLEN, attrbuf,
@@ -1294,7 +1294,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
struct tipc_nl_compat_msg msg;
struct nlmsghdr *req_nlh;
struct nlmsghdr *rep_nlh;
- struct tipc_genlmsghdr *req_userhdr = info->userhdr;
+ struct tipc_genlmsghdr *req_userhdr = genl_info_userhdr(info);
memset(&msg, 0, sizeof(msg));
diff --git a/net/tipc/node.c b/net/tipc/node.c
index a9c5b6594889..3105abe97bb9 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2662,7 +2662,7 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *node;
@@ -2870,7 +2870,7 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
int err;
if (!prev_node) {
- struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
if (!attrs[TIPC_NLA_MON])
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ef8e5139a873..bb1118d02f95 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -3791,7 +3791,7 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct tipc_sock *tsk;
if (!tsk_portid) {
- struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
if (!attrs[TIPC_NLA_SOCK])
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 926232557e77..f892b0903dba 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -465,7 +465,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
int i;
if (!bid && !skip_cnt) {
- struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
struct net *net = sock_net(skb->sk);
struct nlattr *battrs[TIPC_NLA_BEARER_MAX + 1];
char *bname;
diff --git a/tools/net/ynl/generated/devlink-user.c b/tools/net/ynl/generated/devlink-user.c
index 8492789433b9..3a8d8499fab6 100644
--- a/tools/net/ynl/generated/devlink-user.c
+++ b/tools/net/ynl/generated/devlink-user.c
@@ -15,7 +15,21 @@
/* Enums */
static const char * const devlink_op_strmap[] = {
[3] = "get",
+ [7] = "port-get",
+ [DEVLINK_CMD_SB_GET] = "sb-get",
+ [DEVLINK_CMD_SB_POOL_GET] = "sb-pool-get",
+ [DEVLINK_CMD_SB_PORT_POOL_GET] = "sb-port-pool-get",
+ [DEVLINK_CMD_SB_TC_POOL_BIND_GET] = "sb-tc-pool-bind-get",
+ [DEVLINK_CMD_PARAM_GET] = "param-get",
+ [DEVLINK_CMD_REGION_GET] = "region-get",
[DEVLINK_CMD_INFO_GET] = "info-get",
+ [DEVLINK_CMD_HEALTH_REPORTER_GET] = "health-reporter-get",
+ [DEVLINK_CMD_TRAP_GET] = "trap-get",
+ [DEVLINK_CMD_TRAP_GROUP_GET] = "trap-group-get",
+ [DEVLINK_CMD_TRAP_POLICER_GET] = "trap-policer-get",
+ [DEVLINK_CMD_RATE_GET] = "rate-get",
+ [DEVLINK_CMD_LINECARD_GET] = "linecard-get",
+ [DEVLINK_CMD_SELFTESTS_GET] = "selftests-get",
};
const char *devlink_op_str(int op)
@@ -25,6 +39,18 @@ const char *devlink_op_str(int op)
return devlink_op_strmap[op];
}
+static const char * const devlink_sb_pool_type_strmap[] = {
+ [0] = "ingress",
+ [1] = "egress",
+};
+
+const char *devlink_sb_pool_type_str(enum devlink_sb_pool_type value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_sb_pool_type_strmap))
+ return NULL;
+ return devlink_sb_pool_type_strmap[value];
+}
+
/* Policies */
struct ynl_policy_attr devlink_dl_info_version_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_INFO_VERSION_NAME] = { .name = "info-version-name", .type = YNL_PT_NUL_STR, },
@@ -88,6 +114,12 @@ struct ynl_policy_attr devlink_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .name = "bus-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_DEV_NAME] = { .name = "dev-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_PORT_INDEX] = { .name = "port-index", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_SB_INDEX] = { .name = "sb-index", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_SB_POOL_INDEX] = { .name = "sb-pool-index", .type = YNL_PT_U16, },
+ [DEVLINK_ATTR_SB_POOL_TYPE] = { .name = "sb-pool-type", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_SB_TC_INDEX] = { .name = "sb-tc-index", .type = YNL_PT_U16, },
+ [DEVLINK_ATTR_PARAM_NAME] = { .name = "param-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_REGION_NAME] = { .name = "region-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_INFO_DRIVER_NAME] = { .name = "info-driver-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_INFO_SERIAL_NUMBER] = { .name = "info-serial-number", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_INFO_VERSION_FIXED] = { .name = "info-version-fixed", .type = YNL_PT_NEST, .nest = &devlink_dl_info_version_nest, },
@@ -95,7 +127,11 @@ struct ynl_policy_attr devlink_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_INFO_VERSION_STORED] = { .name = "info-version-stored", .type = YNL_PT_NEST, .nest = &devlink_dl_info_version_nest, },
[DEVLINK_ATTR_INFO_VERSION_NAME] = { .name = "info-version-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_INFO_VERSION_VALUE] = { .name = "info-version-value", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .name = "health-reporter-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_TRAP_NAME] = { .name = "trap-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_TRAP_GROUP_NAME] = { .name = "trap-group-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_RELOAD_FAILED] = { .name = "reload-failed", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_TRAP_POLICER_ID] = { .name = "trap-policer-id", .type = YNL_PT_U32, },
[DEVLINK_ATTR_RELOAD_ACTION] = { .name = "reload-action", .type = YNL_PT_U8, },
[DEVLINK_ATTR_DEV_STATS] = { .name = "dev-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_dev_stats_nest, },
[DEVLINK_ATTR_RELOAD_STATS] = { .name = "reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, },
@@ -105,6 +141,8 @@ struct ynl_policy_attr devlink_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_REMOTE_RELOAD_STATS] = { .name = "remote-reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, },
[DEVLINK_ATTR_RELOAD_ACTION_INFO] = { .name = "reload-action-info", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_info_nest, },
[DEVLINK_ATTR_RELOAD_ACTION_STATS] = { .name = "reload-action-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_stats_nest, },
+ [DEVLINK_ATTR_RATE_NODE_NAME] = { .name = "rate-node-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_LINECARD_INDEX] = { .name = "linecard-index", .type = YNL_PT_U32, },
};
struct ynl_policy_nest devlink_nest = {
@@ -531,6 +569,1126 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_PORT_GET ============== */
+/* DEVLINK_CMD_PORT_GET - do */
+void devlink_port_get_req_free(struct devlink_port_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_port_get_rsp_free(struct devlink_port_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_port_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct devlink_port_get_rsp *dst;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_port_get_rsp *
+devlink_port_get(struct ynl_sock *ys, struct devlink_port_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_port_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PORT_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_port_get_rsp_parse;
+ yrs.rsp_cmd = 7;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_port_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_PORT_GET - dump */
+int devlink_port_get_rsp_dump_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_port_get_rsp_dump *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+void devlink_port_get_rsp_list_free(struct devlink_port_get_rsp_list *rsp)
+{
+ struct devlink_port_get_rsp_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp);
+ }
+}
+
+struct devlink_port_get_rsp_list *
+devlink_port_get_dump(struct ynl_sock *ys,
+ struct devlink_port_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_port_get_rsp_list);
+ yds.cb = devlink_port_get_rsp_dump_parse;
+ yds.rsp_cmd = 7;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_PORT_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_port_get_rsp_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_SB_GET ============== */
+/* DEVLINK_CMD_SB_GET - do */
+void devlink_sb_get_req_free(struct devlink_sb_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_sb_get_rsp_free(struct devlink_sb_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_sb_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct devlink_sb_get_rsp *dst;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_SB_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sb_index = 1;
+ dst->sb_index = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_sb_get_rsp *
+devlink_sb_get(struct ynl_sock *ys, struct devlink_sb_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_sb_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.sb_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_sb_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_SB_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_sb_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_SB_GET - dump */
+void devlink_sb_get_list_free(struct devlink_sb_get_list *rsp)
+{
+ struct devlink_sb_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp);
+ }
+}
+
+struct devlink_sb_get_list *
+devlink_sb_get_dump(struct ynl_sock *ys, struct devlink_sb_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_sb_get_list);
+ yds.cb = devlink_sb_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_SB_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_sb_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_SB_POOL_GET ============== */
+/* DEVLINK_CMD_SB_POOL_GET - do */
+void devlink_sb_pool_get_req_free(struct devlink_sb_pool_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_sb_pool_get_rsp_free(struct devlink_sb_pool_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_sb_pool_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_sb_pool_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_SB_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sb_index = 1;
+ dst->sb_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_SB_POOL_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sb_pool_index = 1;
+ dst->sb_pool_index = mnl_attr_get_u16(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_sb_pool_get_rsp *
+devlink_sb_pool_get(struct ynl_sock *ys, struct devlink_sb_pool_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_sb_pool_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_POOL_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.sb_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
+ if (req->_present.sb_pool_index)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_SB_POOL_INDEX, req->sb_pool_index);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_sb_pool_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_SB_POOL_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_sb_pool_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_SB_POOL_GET - dump */
+void devlink_sb_pool_get_list_free(struct devlink_sb_pool_get_list *rsp)
+{
+ struct devlink_sb_pool_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp);
+ }
+}
+
+struct devlink_sb_pool_get_list *
+devlink_sb_pool_get_dump(struct ynl_sock *ys,
+ struct devlink_sb_pool_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_sb_pool_get_list);
+ yds.cb = devlink_sb_pool_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_SB_POOL_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_POOL_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_sb_pool_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_SB_PORT_POOL_GET ============== */
+/* DEVLINK_CMD_SB_PORT_POOL_GET - do */
+void
+devlink_sb_port_pool_get_req_free(struct devlink_sb_port_pool_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void
+devlink_sb_port_pool_get_rsp_free(struct devlink_sb_port_pool_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_sb_port_pool_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_sb_port_pool_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_SB_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sb_index = 1;
+ dst->sb_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_SB_POOL_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sb_pool_index = 1;
+ dst->sb_pool_index = mnl_attr_get_u16(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_sb_port_pool_get_rsp *
+devlink_sb_port_pool_get(struct ynl_sock *ys,
+ struct devlink_sb_port_pool_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_sb_port_pool_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_PORT_POOL_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.sb_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
+ if (req->_present.sb_pool_index)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_SB_POOL_INDEX, req->sb_pool_index);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_sb_port_pool_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_SB_PORT_POOL_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_sb_port_pool_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_SB_PORT_POOL_GET - dump */
+void
+devlink_sb_port_pool_get_list_free(struct devlink_sb_port_pool_get_list *rsp)
+{
+ struct devlink_sb_port_pool_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp);
+ }
+}
+
+struct devlink_sb_port_pool_get_list *
+devlink_sb_port_pool_get_dump(struct ynl_sock *ys,
+ struct devlink_sb_port_pool_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_sb_port_pool_get_list);
+ yds.cb = devlink_sb_port_pool_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_SB_PORT_POOL_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_PORT_POOL_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_sb_port_pool_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_SB_TC_POOL_BIND_GET ============== */
+/* DEVLINK_CMD_SB_TC_POOL_BIND_GET - do */
+void
+devlink_sb_tc_pool_bind_get_req_free(struct devlink_sb_tc_pool_bind_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void
+devlink_sb_tc_pool_bind_get_rsp_free(struct devlink_sb_tc_pool_bind_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_sb_tc_pool_bind_get_rsp_parse(const struct nlmsghdr *nlh,
+ void *data)
+{
+ struct devlink_sb_tc_pool_bind_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_SB_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sb_index = 1;
+ dst->sb_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_SB_POOL_TYPE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sb_pool_type = 1;
+ dst->sb_pool_type = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_SB_TC_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.sb_tc_index = 1;
+ dst->sb_tc_index = mnl_attr_get_u16(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_sb_tc_pool_bind_get_rsp *
+devlink_sb_tc_pool_bind_get(struct ynl_sock *ys,
+ struct devlink_sb_tc_pool_bind_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_sb_tc_pool_bind_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_TC_POOL_BIND_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.sb_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
+ if (req->_present.sb_pool_type)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_SB_POOL_TYPE, req->sb_pool_type);
+ if (req->_present.sb_tc_index)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_SB_TC_INDEX, req->sb_tc_index);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_sb_tc_pool_bind_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_sb_tc_pool_bind_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_SB_TC_POOL_BIND_GET - dump */
+void
+devlink_sb_tc_pool_bind_get_list_free(struct devlink_sb_tc_pool_bind_get_list *rsp)
+{
+ struct devlink_sb_tc_pool_bind_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp);
+ }
+}
+
+struct devlink_sb_tc_pool_bind_get_list *
+devlink_sb_tc_pool_bind_get_dump(struct ynl_sock *ys,
+ struct devlink_sb_tc_pool_bind_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_sb_tc_pool_bind_get_list);
+ yds.cb = devlink_sb_tc_pool_bind_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_TC_POOL_BIND_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_sb_tc_pool_bind_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_PARAM_GET ============== */
+/* DEVLINK_CMD_PARAM_GET - do */
+void devlink_param_get_req_free(struct devlink_param_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->param_name);
+ free(req);
+}
+
+void devlink_param_get_rsp_free(struct devlink_param_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp->param_name);
+ free(rsp);
+}
+
+int devlink_param_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_param_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PARAM_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.param_name_len = len;
+ dst->param_name = malloc(len + 1);
+ memcpy(dst->param_name, mnl_attr_get_str(attr), len);
+ dst->param_name[len] = 0;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_param_get_rsp *
+devlink_param_get(struct ynl_sock *ys, struct devlink_param_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_param_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PARAM_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.param_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_PARAM_NAME, req->param_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_param_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_PARAM_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_param_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_PARAM_GET - dump */
+void devlink_param_get_list_free(struct devlink_param_get_list *rsp)
+{
+ struct devlink_param_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp->obj.param_name);
+ free(rsp);
+ }
+}
+
+struct devlink_param_get_list *
+devlink_param_get_dump(struct ynl_sock *ys,
+ struct devlink_param_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_param_get_list);
+ yds.cb = devlink_param_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_PARAM_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_PARAM_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_param_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_REGION_GET ============== */
+/* DEVLINK_CMD_REGION_GET - do */
+void devlink_region_get_req_free(struct devlink_region_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->region_name);
+ free(req);
+}
+
+void devlink_region_get_rsp_free(struct devlink_region_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp->region_name);
+ free(rsp);
+}
+
+int devlink_region_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_region_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_REGION_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.region_name_len = len;
+ dst->region_name = malloc(len + 1);
+ memcpy(dst->region_name, mnl_attr_get_str(attr), len);
+ dst->region_name[len] = 0;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_region_get_rsp *
+devlink_region_get(struct ynl_sock *ys, struct devlink_region_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_region_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_REGION_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.region_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_REGION_NAME, req->region_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_region_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_REGION_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_region_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_REGION_GET - dump */
+void devlink_region_get_list_free(struct devlink_region_get_list *rsp)
+{
+ struct devlink_region_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp->obj.region_name);
+ free(rsp);
+ }
+}
+
+struct devlink_region_get_list *
+devlink_region_get_dump(struct ynl_sock *ys,
+ struct devlink_region_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_region_get_list);
+ yds.cb = devlink_region_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_REGION_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_REGION_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_region_get_list_free(yds.first);
+ return NULL;
+}
+
/* ============== DEVLINK_CMD_INFO_GET ============== */
/* DEVLINK_CMD_INFO_GET - do */
void devlink_info_get_req_free(struct devlink_info_get_req *req)
@@ -769,6 +1927,1056 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_GET ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_GET - do */
+void
+devlink_health_reporter_get_req_free(struct devlink_health_reporter_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->health_reporter_name);
+ free(req);
+}
+
+void
+devlink_health_reporter_get_rsp_free(struct devlink_health_reporter_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp->health_reporter_name);
+ free(rsp);
+}
+
+int devlink_health_reporter_get_rsp_parse(const struct nlmsghdr *nlh,
+ void *data)
+{
+ struct devlink_health_reporter_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_HEALTH_REPORTER_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.health_reporter_name_len = len;
+ dst->health_reporter_name = malloc(len + 1);
+ memcpy(dst->health_reporter_name, mnl_attr_get_str(attr), len);
+ dst->health_reporter_name[len] = 0;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_health_reporter_get_rsp *
+devlink_health_reporter_get(struct ynl_sock *ys,
+ struct devlink_health_reporter_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_health_reporter_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_HEALTH_REPORTER_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.health_reporter_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_health_reporter_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_HEALTH_REPORTER_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_health_reporter_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_HEALTH_REPORTER_GET - dump */
+void
+devlink_health_reporter_get_list_free(struct devlink_health_reporter_get_list *rsp)
+{
+ struct devlink_health_reporter_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp->obj.health_reporter_name);
+ free(rsp);
+ }
+}
+
+struct devlink_health_reporter_get_list *
+devlink_health_reporter_get_dump(struct ynl_sock *ys,
+ struct devlink_health_reporter_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_health_reporter_get_list);
+ yds.cb = devlink_health_reporter_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_HEALTH_REPORTER_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_HEALTH_REPORTER_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_health_reporter_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_TRAP_GET ============== */
+/* DEVLINK_CMD_TRAP_GET - do */
+void devlink_trap_get_req_free(struct devlink_trap_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->trap_name);
+ free(req);
+}
+
+void devlink_trap_get_rsp_free(struct devlink_trap_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp->trap_name);
+ free(rsp);
+}
+
+int devlink_trap_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct devlink_trap_get_rsp *dst;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_TRAP_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.trap_name_len = len;
+ dst->trap_name = malloc(len + 1);
+ memcpy(dst->trap_name, mnl_attr_get_str(attr), len);
+ dst->trap_name[len] = 0;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_trap_get_rsp *
+devlink_trap_get(struct ynl_sock *ys, struct devlink_trap_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_trap_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_TRAP_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.trap_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_TRAP_NAME, req->trap_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_trap_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_TRAP_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_trap_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_TRAP_GET - dump */
+void devlink_trap_get_list_free(struct devlink_trap_get_list *rsp)
+{
+ struct devlink_trap_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp->obj.trap_name);
+ free(rsp);
+ }
+}
+
+struct devlink_trap_get_list *
+devlink_trap_get_dump(struct ynl_sock *ys,
+ struct devlink_trap_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_trap_get_list);
+ yds.cb = devlink_trap_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_TRAP_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_trap_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_TRAP_GROUP_GET ============== */
+/* DEVLINK_CMD_TRAP_GROUP_GET - do */
+void devlink_trap_group_get_req_free(struct devlink_trap_group_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->trap_group_name);
+ free(req);
+}
+
+void devlink_trap_group_get_rsp_free(struct devlink_trap_group_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp->trap_group_name);
+ free(rsp);
+}
+
+int devlink_trap_group_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_trap_group_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_TRAP_GROUP_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.trap_group_name_len = len;
+ dst->trap_group_name = malloc(len + 1);
+ memcpy(dst->trap_group_name, mnl_attr_get_str(attr), len);
+ dst->trap_group_name[len] = 0;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_trap_group_get_rsp *
+devlink_trap_group_get(struct ynl_sock *ys,
+ struct devlink_trap_group_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_trap_group_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_TRAP_GROUP_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.trap_group_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_TRAP_GROUP_NAME, req->trap_group_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_trap_group_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_TRAP_GROUP_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_trap_group_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_TRAP_GROUP_GET - dump */
+void devlink_trap_group_get_list_free(struct devlink_trap_group_get_list *rsp)
+{
+ struct devlink_trap_group_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp->obj.trap_group_name);
+ free(rsp);
+ }
+}
+
+struct devlink_trap_group_get_list *
+devlink_trap_group_get_dump(struct ynl_sock *ys,
+ struct devlink_trap_group_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_trap_group_get_list);
+ yds.cb = devlink_trap_group_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_TRAP_GROUP_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_GROUP_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_trap_group_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_TRAP_POLICER_GET ============== */
+/* DEVLINK_CMD_TRAP_POLICER_GET - do */
+void
+devlink_trap_policer_get_req_free(struct devlink_trap_policer_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void
+devlink_trap_policer_get_rsp_free(struct devlink_trap_policer_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_trap_policer_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_trap_policer_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_TRAP_POLICER_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.trap_policer_id = 1;
+ dst->trap_policer_id = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_trap_policer_get_rsp *
+devlink_trap_policer_get(struct ynl_sock *ys,
+ struct devlink_trap_policer_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_trap_policer_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_TRAP_POLICER_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.trap_policer_id)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_TRAP_POLICER_ID, req->trap_policer_id);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_trap_policer_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_TRAP_POLICER_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_trap_policer_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_TRAP_POLICER_GET - dump */
+void
+devlink_trap_policer_get_list_free(struct devlink_trap_policer_get_list *rsp)
+{
+ struct devlink_trap_policer_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp);
+ }
+}
+
+struct devlink_trap_policer_get_list *
+devlink_trap_policer_get_dump(struct ynl_sock *ys,
+ struct devlink_trap_policer_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_trap_policer_get_list);
+ yds.cb = devlink_trap_policer_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_TRAP_POLICER_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_POLICER_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_trap_policer_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_RATE_GET ============== */
+/* DEVLINK_CMD_RATE_GET - do */
+void devlink_rate_get_req_free(struct devlink_rate_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->rate_node_name);
+ free(req);
+}
+
+void devlink_rate_get_rsp_free(struct devlink_rate_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp->rate_node_name);
+ free(rsp);
+}
+
+int devlink_rate_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct devlink_rate_get_rsp *dst;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_RATE_NODE_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.rate_node_name_len = len;
+ dst->rate_node_name = malloc(len + 1);
+ memcpy(dst->rate_node_name, mnl_attr_get_str(attr), len);
+ dst->rate_node_name[len] = 0;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_rate_get_rsp *
+devlink_rate_get(struct ynl_sock *ys, struct devlink_rate_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_rate_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_RATE_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.rate_node_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_RATE_NODE_NAME, req->rate_node_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_rate_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_RATE_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_rate_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_RATE_GET - dump */
+void devlink_rate_get_list_free(struct devlink_rate_get_list *rsp)
+{
+ struct devlink_rate_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp->obj.rate_node_name);
+ free(rsp);
+ }
+}
+
+struct devlink_rate_get_list *
+devlink_rate_get_dump(struct ynl_sock *ys,
+ struct devlink_rate_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_rate_get_list);
+ yds.cb = devlink_rate_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_RATE_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_RATE_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_rate_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_LINECARD_GET ============== */
+/* DEVLINK_CMD_LINECARD_GET - do */
+void devlink_linecard_get_req_free(struct devlink_linecard_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_linecard_get_rsp_free(struct devlink_linecard_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_linecard_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_linecard_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_LINECARD_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.linecard_index = 1;
+ dst->linecard_index = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_linecard_get_rsp *
+devlink_linecard_get(struct ynl_sock *ys, struct devlink_linecard_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_linecard_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_LINECARD_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.linecard_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_LINECARD_INDEX, req->linecard_index);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_linecard_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_LINECARD_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_linecard_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_LINECARD_GET - dump */
+void devlink_linecard_get_list_free(struct devlink_linecard_get_list *rsp)
+{
+ struct devlink_linecard_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp);
+ }
+}
+
+struct devlink_linecard_get_list *
+devlink_linecard_get_dump(struct ynl_sock *ys,
+ struct devlink_linecard_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_linecard_get_list);
+ yds.cb = devlink_linecard_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_LINECARD_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_LINECARD_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_linecard_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_SELFTESTS_GET ============== */
+/* DEVLINK_CMD_SELFTESTS_GET - do */
+void devlink_selftests_get_req_free(struct devlink_selftests_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_selftests_get_rsp_free(struct devlink_selftests_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_selftests_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_selftests_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_selftests_get_rsp *
+devlink_selftests_get(struct ynl_sock *ys,
+ struct devlink_selftests_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_selftests_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SELFTESTS_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_selftests_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_SELFTESTS_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_selftests_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_SELFTESTS_GET - dump */
+void devlink_selftests_get_list_free(struct devlink_selftests_get_list *rsp)
+{
+ struct devlink_selftests_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp);
+ }
+}
+
+struct devlink_selftests_get_list *
+devlink_selftests_get_dump(struct ynl_sock *ys)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_selftests_get_list);
+ yds.cb = devlink_selftests_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_SELFTESTS_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SELFTESTS_GET, 1);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_selftests_get_list_free(yds.first);
+ return NULL;
+}
+
const struct ynl_family ynl_devlink_family = {
.name = "devlink",
};
diff --git a/tools/net/ynl/generated/devlink-user.h b/tools/net/ynl/generated/devlink-user.h
index af65e2f2f529..4b686d147613 100644
--- a/tools/net/ynl/generated/devlink-user.h
+++ b/tools/net/ynl/generated/devlink-user.h
@@ -17,6 +17,7 @@ extern const struct ynl_family ynl_devlink_family;
/* Enums */
const char *devlink_op_str(int op);
+const char *devlink_sb_pool_type_str(enum devlink_sb_pool_type value);
/* Common nested types */
struct devlink_dl_info_version {
@@ -140,6 +141,939 @@ void devlink_get_list_free(struct devlink_get_list *rsp);
struct devlink_get_list *devlink_get_dump(struct ynl_sock *ys);
+/* ============== DEVLINK_CMD_PORT_GET ============== */
+/* DEVLINK_CMD_PORT_GET - do */
+struct devlink_port_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+};
+
+static inline struct devlink_port_get_req *devlink_port_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_port_get_req));
+}
+void devlink_port_get_req_free(struct devlink_port_get_req *req);
+
+static inline void
+devlink_port_get_req_set_bus_name(struct devlink_port_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_port_get_req_set_dev_name(struct devlink_port_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_port_get_req_set_port_index(struct devlink_port_get_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+
+struct devlink_port_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+};
+
+void devlink_port_get_rsp_free(struct devlink_port_get_rsp *rsp);
+
+/*
+ * Get devlink port instances.
+ */
+struct devlink_port_get_rsp *
+devlink_port_get(struct ynl_sock *ys, struct devlink_port_get_req *req);
+
+/* DEVLINK_CMD_PORT_GET - dump */
+struct devlink_port_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_port_get_req_dump *
+devlink_port_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_port_get_req_dump));
+}
+void devlink_port_get_req_dump_free(struct devlink_port_get_req_dump *req);
+
+static inline void
+devlink_port_get_req_dump_set_bus_name(struct devlink_port_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_port_get_req_dump_set_dev_name(struct devlink_port_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_port_get_rsp_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+};
+
+struct devlink_port_get_rsp_list {
+ struct devlink_port_get_rsp_list *next;
+ struct devlink_port_get_rsp_dump obj __attribute__ ((aligned (8)));
+};
+
+void devlink_port_get_rsp_list_free(struct devlink_port_get_rsp_list *rsp);
+
+struct devlink_port_get_rsp_list *
+devlink_port_get_dump(struct ynl_sock *ys,
+ struct devlink_port_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_SB_GET ============== */
+/* DEVLINK_CMD_SB_GET - do */
+struct devlink_sb_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 sb_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 sb_index;
+};
+
+static inline struct devlink_sb_get_req *devlink_sb_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_get_req));
+}
+void devlink_sb_get_req_free(struct devlink_sb_get_req *req);
+
+static inline void
+devlink_sb_get_req_set_bus_name(struct devlink_sb_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_get_req_set_dev_name(struct devlink_sb_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_sb_get_req_set_sb_index(struct devlink_sb_get_req *req, __u32 sb_index)
+{
+ req->_present.sb_index = 1;
+ req->sb_index = sb_index;
+}
+
+struct devlink_sb_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 sb_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 sb_index;
+};
+
+void devlink_sb_get_rsp_free(struct devlink_sb_get_rsp *rsp);
+
+/*
+ * Get shared buffer instances.
+ */
+struct devlink_sb_get_rsp *
+devlink_sb_get(struct ynl_sock *ys, struct devlink_sb_get_req *req);
+
+/* DEVLINK_CMD_SB_GET - dump */
+struct devlink_sb_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_sb_get_req_dump *
+devlink_sb_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_get_req_dump));
+}
+void devlink_sb_get_req_dump_free(struct devlink_sb_get_req_dump *req);
+
+static inline void
+devlink_sb_get_req_dump_set_bus_name(struct devlink_sb_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_get_req_dump_set_dev_name(struct devlink_sb_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_sb_get_list {
+ struct devlink_sb_get_list *next;
+ struct devlink_sb_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void devlink_sb_get_list_free(struct devlink_sb_get_list *rsp);
+
+struct devlink_sb_get_list *
+devlink_sb_get_dump(struct ynl_sock *ys, struct devlink_sb_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_SB_POOL_GET ============== */
+/* DEVLINK_CMD_SB_POOL_GET - do */
+struct devlink_sb_pool_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 sb_index:1;
+ __u32 sb_pool_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 sb_index;
+ __u16 sb_pool_index;
+};
+
+static inline struct devlink_sb_pool_get_req *
+devlink_sb_pool_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_pool_get_req));
+}
+void devlink_sb_pool_get_req_free(struct devlink_sb_pool_get_req *req);
+
+static inline void
+devlink_sb_pool_get_req_set_bus_name(struct devlink_sb_pool_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_pool_get_req_set_dev_name(struct devlink_sb_pool_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_sb_pool_get_req_set_sb_index(struct devlink_sb_pool_get_req *req,
+ __u32 sb_index)
+{
+ req->_present.sb_index = 1;
+ req->sb_index = sb_index;
+}
+static inline void
+devlink_sb_pool_get_req_set_sb_pool_index(struct devlink_sb_pool_get_req *req,
+ __u16 sb_pool_index)
+{
+ req->_present.sb_pool_index = 1;
+ req->sb_pool_index = sb_pool_index;
+}
+
+struct devlink_sb_pool_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 sb_index:1;
+ __u32 sb_pool_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 sb_index;
+ __u16 sb_pool_index;
+};
+
+void devlink_sb_pool_get_rsp_free(struct devlink_sb_pool_get_rsp *rsp);
+
+/*
+ * Get shared buffer pool instances.
+ */
+struct devlink_sb_pool_get_rsp *
+devlink_sb_pool_get(struct ynl_sock *ys, struct devlink_sb_pool_get_req *req);
+
+/* DEVLINK_CMD_SB_POOL_GET - dump */
+struct devlink_sb_pool_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_sb_pool_get_req_dump *
+devlink_sb_pool_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_pool_get_req_dump));
+}
+void
+devlink_sb_pool_get_req_dump_free(struct devlink_sb_pool_get_req_dump *req);
+
+static inline void
+devlink_sb_pool_get_req_dump_set_bus_name(struct devlink_sb_pool_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_pool_get_req_dump_set_dev_name(struct devlink_sb_pool_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_sb_pool_get_list {
+ struct devlink_sb_pool_get_list *next;
+ struct devlink_sb_pool_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void devlink_sb_pool_get_list_free(struct devlink_sb_pool_get_list *rsp);
+
+struct devlink_sb_pool_get_list *
+devlink_sb_pool_get_dump(struct ynl_sock *ys,
+ struct devlink_sb_pool_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_SB_PORT_POOL_GET ============== */
+/* DEVLINK_CMD_SB_PORT_POOL_GET - do */
+struct devlink_sb_port_pool_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 sb_index:1;
+ __u32 sb_pool_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ __u32 sb_index;
+ __u16 sb_pool_index;
+};
+
+static inline struct devlink_sb_port_pool_get_req *
+devlink_sb_port_pool_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_port_pool_get_req));
+}
+void
+devlink_sb_port_pool_get_req_free(struct devlink_sb_port_pool_get_req *req);
+
+static inline void
+devlink_sb_port_pool_get_req_set_bus_name(struct devlink_sb_port_pool_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_port_pool_get_req_set_dev_name(struct devlink_sb_port_pool_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_sb_port_pool_get_req_set_port_index(struct devlink_sb_port_pool_get_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_sb_port_pool_get_req_set_sb_index(struct devlink_sb_port_pool_get_req *req,
+ __u32 sb_index)
+{
+ req->_present.sb_index = 1;
+ req->sb_index = sb_index;
+}
+static inline void
+devlink_sb_port_pool_get_req_set_sb_pool_index(struct devlink_sb_port_pool_get_req *req,
+ __u16 sb_pool_index)
+{
+ req->_present.sb_pool_index = 1;
+ req->sb_pool_index = sb_pool_index;
+}
+
+struct devlink_sb_port_pool_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 sb_index:1;
+ __u32 sb_pool_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ __u32 sb_index;
+ __u16 sb_pool_index;
+};
+
+void
+devlink_sb_port_pool_get_rsp_free(struct devlink_sb_port_pool_get_rsp *rsp);
+
+/*
+ * Get shared buffer port-pool combinations and threshold.
+ */
+struct devlink_sb_port_pool_get_rsp *
+devlink_sb_port_pool_get(struct ynl_sock *ys,
+ struct devlink_sb_port_pool_get_req *req);
+
+/* DEVLINK_CMD_SB_PORT_POOL_GET - dump */
+struct devlink_sb_port_pool_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_sb_port_pool_get_req_dump *
+devlink_sb_port_pool_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_port_pool_get_req_dump));
+}
+void
+devlink_sb_port_pool_get_req_dump_free(struct devlink_sb_port_pool_get_req_dump *req);
+
+static inline void
+devlink_sb_port_pool_get_req_dump_set_bus_name(struct devlink_sb_port_pool_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_port_pool_get_req_dump_set_dev_name(struct devlink_sb_port_pool_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_sb_port_pool_get_list {
+ struct devlink_sb_port_pool_get_list *next;
+ struct devlink_sb_port_pool_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void
+devlink_sb_port_pool_get_list_free(struct devlink_sb_port_pool_get_list *rsp);
+
+struct devlink_sb_port_pool_get_list *
+devlink_sb_port_pool_get_dump(struct ynl_sock *ys,
+ struct devlink_sb_port_pool_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_SB_TC_POOL_BIND_GET ============== */
+/* DEVLINK_CMD_SB_TC_POOL_BIND_GET - do */
+struct devlink_sb_tc_pool_bind_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 sb_index:1;
+ __u32 sb_pool_type:1;
+ __u32 sb_tc_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ __u32 sb_index;
+ enum devlink_sb_pool_type sb_pool_type;
+ __u16 sb_tc_index;
+};
+
+static inline struct devlink_sb_tc_pool_bind_get_req *
+devlink_sb_tc_pool_bind_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_tc_pool_bind_get_req));
+}
+void
+devlink_sb_tc_pool_bind_get_req_free(struct devlink_sb_tc_pool_bind_get_req *req);
+
+static inline void
+devlink_sb_tc_pool_bind_get_req_set_bus_name(struct devlink_sb_tc_pool_bind_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_tc_pool_bind_get_req_set_dev_name(struct devlink_sb_tc_pool_bind_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_sb_tc_pool_bind_get_req_set_port_index(struct devlink_sb_tc_pool_bind_get_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_sb_tc_pool_bind_get_req_set_sb_index(struct devlink_sb_tc_pool_bind_get_req *req,
+ __u32 sb_index)
+{
+ req->_present.sb_index = 1;
+ req->sb_index = sb_index;
+}
+static inline void
+devlink_sb_tc_pool_bind_get_req_set_sb_pool_type(struct devlink_sb_tc_pool_bind_get_req *req,
+ enum devlink_sb_pool_type sb_pool_type)
+{
+ req->_present.sb_pool_type = 1;
+ req->sb_pool_type = sb_pool_type;
+}
+static inline void
+devlink_sb_tc_pool_bind_get_req_set_sb_tc_index(struct devlink_sb_tc_pool_bind_get_req *req,
+ __u16 sb_tc_index)
+{
+ req->_present.sb_tc_index = 1;
+ req->sb_tc_index = sb_tc_index;
+}
+
+struct devlink_sb_tc_pool_bind_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 sb_index:1;
+ __u32 sb_pool_type:1;
+ __u32 sb_tc_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ __u32 sb_index;
+ enum devlink_sb_pool_type sb_pool_type;
+ __u16 sb_tc_index;
+};
+
+void
+devlink_sb_tc_pool_bind_get_rsp_free(struct devlink_sb_tc_pool_bind_get_rsp *rsp);
+
+/*
+ * Get shared buffer port-TC to pool bindings and threshold.
+ */
+struct devlink_sb_tc_pool_bind_get_rsp *
+devlink_sb_tc_pool_bind_get(struct ynl_sock *ys,
+ struct devlink_sb_tc_pool_bind_get_req *req);
+
+/* DEVLINK_CMD_SB_TC_POOL_BIND_GET - dump */
+struct devlink_sb_tc_pool_bind_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_sb_tc_pool_bind_get_req_dump *
+devlink_sb_tc_pool_bind_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_tc_pool_bind_get_req_dump));
+}
+void
+devlink_sb_tc_pool_bind_get_req_dump_free(struct devlink_sb_tc_pool_bind_get_req_dump *req);
+
+static inline void
+devlink_sb_tc_pool_bind_get_req_dump_set_bus_name(struct devlink_sb_tc_pool_bind_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_tc_pool_bind_get_req_dump_set_dev_name(struct devlink_sb_tc_pool_bind_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_sb_tc_pool_bind_get_list {
+ struct devlink_sb_tc_pool_bind_get_list *next;
+ struct devlink_sb_tc_pool_bind_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void
+devlink_sb_tc_pool_bind_get_list_free(struct devlink_sb_tc_pool_bind_get_list *rsp);
+
+struct devlink_sb_tc_pool_bind_get_list *
+devlink_sb_tc_pool_bind_get_dump(struct ynl_sock *ys,
+ struct devlink_sb_tc_pool_bind_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_PARAM_GET ============== */
+/* DEVLINK_CMD_PARAM_GET - do */
+struct devlink_param_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 param_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *param_name;
+};
+
+static inline struct devlink_param_get_req *devlink_param_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_param_get_req));
+}
+void devlink_param_get_req_free(struct devlink_param_get_req *req);
+
+static inline void
+devlink_param_get_req_set_bus_name(struct devlink_param_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_param_get_req_set_dev_name(struct devlink_param_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_param_get_req_set_param_name(struct devlink_param_get_req *req,
+ const char *param_name)
+{
+ free(req->param_name);
+ req->_present.param_name_len = strlen(param_name);
+ req->param_name = malloc(req->_present.param_name_len + 1);
+ memcpy(req->param_name, param_name, req->_present.param_name_len);
+ req->param_name[req->_present.param_name_len] = 0;
+}
+
+struct devlink_param_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 param_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *param_name;
+};
+
+void devlink_param_get_rsp_free(struct devlink_param_get_rsp *rsp);
+
+/*
+ * Get param instances.
+ */
+struct devlink_param_get_rsp *
+devlink_param_get(struct ynl_sock *ys, struct devlink_param_get_req *req);
+
+/* DEVLINK_CMD_PARAM_GET - dump */
+struct devlink_param_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_param_get_req_dump *
+devlink_param_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_param_get_req_dump));
+}
+void devlink_param_get_req_dump_free(struct devlink_param_get_req_dump *req);
+
+static inline void
+devlink_param_get_req_dump_set_bus_name(struct devlink_param_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_param_get_req_dump_set_dev_name(struct devlink_param_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_param_get_list {
+ struct devlink_param_get_list *next;
+ struct devlink_param_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void devlink_param_get_list_free(struct devlink_param_get_list *rsp);
+
+struct devlink_param_get_list *
+devlink_param_get_dump(struct ynl_sock *ys,
+ struct devlink_param_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_REGION_GET ============== */
+/* DEVLINK_CMD_REGION_GET - do */
+struct devlink_region_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 region_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *region_name;
+};
+
+static inline struct devlink_region_get_req *devlink_region_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_region_get_req));
+}
+void devlink_region_get_req_free(struct devlink_region_get_req *req);
+
+static inline void
+devlink_region_get_req_set_bus_name(struct devlink_region_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_region_get_req_set_dev_name(struct devlink_region_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_region_get_req_set_port_index(struct devlink_region_get_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_region_get_req_set_region_name(struct devlink_region_get_req *req,
+ const char *region_name)
+{
+ free(req->region_name);
+ req->_present.region_name_len = strlen(region_name);
+ req->region_name = malloc(req->_present.region_name_len + 1);
+ memcpy(req->region_name, region_name, req->_present.region_name_len);
+ req->region_name[req->_present.region_name_len] = 0;
+}
+
+struct devlink_region_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 region_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *region_name;
+};
+
+void devlink_region_get_rsp_free(struct devlink_region_get_rsp *rsp);
+
+/*
+ * Get region instances.
+ */
+struct devlink_region_get_rsp *
+devlink_region_get(struct ynl_sock *ys, struct devlink_region_get_req *req);
+
+/* DEVLINK_CMD_REGION_GET - dump */
+struct devlink_region_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_region_get_req_dump *
+devlink_region_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_region_get_req_dump));
+}
+void devlink_region_get_req_dump_free(struct devlink_region_get_req_dump *req);
+
+static inline void
+devlink_region_get_req_dump_set_bus_name(struct devlink_region_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_region_get_req_dump_set_dev_name(struct devlink_region_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_region_get_list {
+ struct devlink_region_get_list *next;
+ struct devlink_region_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void devlink_region_get_list_free(struct devlink_region_get_list *rsp);
+
+struct devlink_region_get_list *
+devlink_region_get_dump(struct ynl_sock *ys,
+ struct devlink_region_get_req_dump *req);
+
/* ============== DEVLINK_CMD_INFO_GET ============== */
/* DEVLINK_CMD_INFO_GET - do */
struct devlink_info_get_req {
@@ -217,4 +1151,842 @@ void devlink_info_get_list_free(struct devlink_info_get_list *rsp);
struct devlink_info_get_list *devlink_info_get_dump(struct ynl_sock *ys);
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_GET ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_GET - do */
+struct devlink_health_reporter_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 health_reporter_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *health_reporter_name;
+};
+
+static inline struct devlink_health_reporter_get_req *
+devlink_health_reporter_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_health_reporter_get_req));
+}
+void
+devlink_health_reporter_get_req_free(struct devlink_health_reporter_get_req *req);
+
+static inline void
+devlink_health_reporter_get_req_set_bus_name(struct devlink_health_reporter_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_get_req_set_dev_name(struct devlink_health_reporter_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_get_req_set_port_index(struct devlink_health_reporter_get_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_health_reporter_get_req_set_health_reporter_name(struct devlink_health_reporter_get_req *req,
+ const char *health_reporter_name)
+{
+ free(req->health_reporter_name);
+ req->_present.health_reporter_name_len = strlen(health_reporter_name);
+ req->health_reporter_name = malloc(req->_present.health_reporter_name_len + 1);
+ memcpy(req->health_reporter_name, health_reporter_name, req->_present.health_reporter_name_len);
+ req->health_reporter_name[req->_present.health_reporter_name_len] = 0;
+}
+
+struct devlink_health_reporter_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 health_reporter_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *health_reporter_name;
+};
+
+void
+devlink_health_reporter_get_rsp_free(struct devlink_health_reporter_get_rsp *rsp);
+
+/*
+ * Get health reporter instances.
+ */
+struct devlink_health_reporter_get_rsp *
+devlink_health_reporter_get(struct ynl_sock *ys,
+ struct devlink_health_reporter_get_req *req);
+
+/* DEVLINK_CMD_HEALTH_REPORTER_GET - dump */
+struct devlink_health_reporter_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+};
+
+static inline struct devlink_health_reporter_get_req_dump *
+devlink_health_reporter_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_health_reporter_get_req_dump));
+}
+void
+devlink_health_reporter_get_req_dump_free(struct devlink_health_reporter_get_req_dump *req);
+
+static inline void
+devlink_health_reporter_get_req_dump_set_bus_name(struct devlink_health_reporter_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_get_req_dump_set_dev_name(struct devlink_health_reporter_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_get_req_dump_set_port_index(struct devlink_health_reporter_get_req_dump *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+
+struct devlink_health_reporter_get_list {
+ struct devlink_health_reporter_get_list *next;
+ struct devlink_health_reporter_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void
+devlink_health_reporter_get_list_free(struct devlink_health_reporter_get_list *rsp);
+
+struct devlink_health_reporter_get_list *
+devlink_health_reporter_get_dump(struct ynl_sock *ys,
+ struct devlink_health_reporter_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_TRAP_GET ============== */
+/* DEVLINK_CMD_TRAP_GET - do */
+struct devlink_trap_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 trap_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *trap_name;
+};
+
+static inline struct devlink_trap_get_req *devlink_trap_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_trap_get_req));
+}
+void devlink_trap_get_req_free(struct devlink_trap_get_req *req);
+
+static inline void
+devlink_trap_get_req_set_bus_name(struct devlink_trap_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_trap_get_req_set_dev_name(struct devlink_trap_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_trap_get_req_set_trap_name(struct devlink_trap_get_req *req,
+ const char *trap_name)
+{
+ free(req->trap_name);
+ req->_present.trap_name_len = strlen(trap_name);
+ req->trap_name = malloc(req->_present.trap_name_len + 1);
+ memcpy(req->trap_name, trap_name, req->_present.trap_name_len);
+ req->trap_name[req->_present.trap_name_len] = 0;
+}
+
+struct devlink_trap_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 trap_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *trap_name;
+};
+
+void devlink_trap_get_rsp_free(struct devlink_trap_get_rsp *rsp);
+
+/*
+ * Get trap instances.
+ */
+struct devlink_trap_get_rsp *
+devlink_trap_get(struct ynl_sock *ys, struct devlink_trap_get_req *req);
+
+/* DEVLINK_CMD_TRAP_GET - dump */
+struct devlink_trap_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_trap_get_req_dump *
+devlink_trap_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_trap_get_req_dump));
+}
+void devlink_trap_get_req_dump_free(struct devlink_trap_get_req_dump *req);
+
+static inline void
+devlink_trap_get_req_dump_set_bus_name(struct devlink_trap_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_trap_get_req_dump_set_dev_name(struct devlink_trap_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_trap_get_list {
+ struct devlink_trap_get_list *next;
+ struct devlink_trap_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void devlink_trap_get_list_free(struct devlink_trap_get_list *rsp);
+
+struct devlink_trap_get_list *
+devlink_trap_get_dump(struct ynl_sock *ys,
+ struct devlink_trap_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_TRAP_GROUP_GET ============== */
+/* DEVLINK_CMD_TRAP_GROUP_GET - do */
+struct devlink_trap_group_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 trap_group_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *trap_group_name;
+};
+
+static inline struct devlink_trap_group_get_req *
+devlink_trap_group_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_trap_group_get_req));
+}
+void devlink_trap_group_get_req_free(struct devlink_trap_group_get_req *req);
+
+static inline void
+devlink_trap_group_get_req_set_bus_name(struct devlink_trap_group_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_trap_group_get_req_set_dev_name(struct devlink_trap_group_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_trap_group_get_req_set_trap_group_name(struct devlink_trap_group_get_req *req,
+ const char *trap_group_name)
+{
+ free(req->trap_group_name);
+ req->_present.trap_group_name_len = strlen(trap_group_name);
+ req->trap_group_name = malloc(req->_present.trap_group_name_len + 1);
+ memcpy(req->trap_group_name, trap_group_name, req->_present.trap_group_name_len);
+ req->trap_group_name[req->_present.trap_group_name_len] = 0;
+}
+
+struct devlink_trap_group_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 trap_group_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *trap_group_name;
+};
+
+void devlink_trap_group_get_rsp_free(struct devlink_trap_group_get_rsp *rsp);
+
+/*
+ * Get trap group instances.
+ */
+struct devlink_trap_group_get_rsp *
+devlink_trap_group_get(struct ynl_sock *ys,
+ struct devlink_trap_group_get_req *req);
+
+/* DEVLINK_CMD_TRAP_GROUP_GET - dump */
+struct devlink_trap_group_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_trap_group_get_req_dump *
+devlink_trap_group_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_trap_group_get_req_dump));
+}
+void
+devlink_trap_group_get_req_dump_free(struct devlink_trap_group_get_req_dump *req);
+
+static inline void
+devlink_trap_group_get_req_dump_set_bus_name(struct devlink_trap_group_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_trap_group_get_req_dump_set_dev_name(struct devlink_trap_group_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_trap_group_get_list {
+ struct devlink_trap_group_get_list *next;
+ struct devlink_trap_group_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void devlink_trap_group_get_list_free(struct devlink_trap_group_get_list *rsp);
+
+struct devlink_trap_group_get_list *
+devlink_trap_group_get_dump(struct ynl_sock *ys,
+ struct devlink_trap_group_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_TRAP_POLICER_GET ============== */
+/* DEVLINK_CMD_TRAP_POLICER_GET - do */
+struct devlink_trap_policer_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 trap_policer_id:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 trap_policer_id;
+};
+
+static inline struct devlink_trap_policer_get_req *
+devlink_trap_policer_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_trap_policer_get_req));
+}
+void
+devlink_trap_policer_get_req_free(struct devlink_trap_policer_get_req *req);
+
+static inline void
+devlink_trap_policer_get_req_set_bus_name(struct devlink_trap_policer_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_trap_policer_get_req_set_dev_name(struct devlink_trap_policer_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_trap_policer_get_req_set_trap_policer_id(struct devlink_trap_policer_get_req *req,
+ __u32 trap_policer_id)
+{
+ req->_present.trap_policer_id = 1;
+ req->trap_policer_id = trap_policer_id;
+}
+
+struct devlink_trap_policer_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 trap_policer_id:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 trap_policer_id;
+};
+
+void
+devlink_trap_policer_get_rsp_free(struct devlink_trap_policer_get_rsp *rsp);
+
+/*
+ * Get trap policer instances.
+ */
+struct devlink_trap_policer_get_rsp *
+devlink_trap_policer_get(struct ynl_sock *ys,
+ struct devlink_trap_policer_get_req *req);
+
+/* DEVLINK_CMD_TRAP_POLICER_GET - dump */
+struct devlink_trap_policer_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_trap_policer_get_req_dump *
+devlink_trap_policer_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_trap_policer_get_req_dump));
+}
+void
+devlink_trap_policer_get_req_dump_free(struct devlink_trap_policer_get_req_dump *req);
+
+static inline void
+devlink_trap_policer_get_req_dump_set_bus_name(struct devlink_trap_policer_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_trap_policer_get_req_dump_set_dev_name(struct devlink_trap_policer_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_trap_policer_get_list {
+ struct devlink_trap_policer_get_list *next;
+ struct devlink_trap_policer_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void
+devlink_trap_policer_get_list_free(struct devlink_trap_policer_get_list *rsp);
+
+struct devlink_trap_policer_get_list *
+devlink_trap_policer_get_dump(struct ynl_sock *ys,
+ struct devlink_trap_policer_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_RATE_GET ============== */
+/* DEVLINK_CMD_RATE_GET - do */
+struct devlink_rate_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 rate_node_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *rate_node_name;
+};
+
+static inline struct devlink_rate_get_req *devlink_rate_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_rate_get_req));
+}
+void devlink_rate_get_req_free(struct devlink_rate_get_req *req);
+
+static inline void
+devlink_rate_get_req_set_bus_name(struct devlink_rate_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_rate_get_req_set_dev_name(struct devlink_rate_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_rate_get_req_set_port_index(struct devlink_rate_get_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_rate_get_req_set_rate_node_name(struct devlink_rate_get_req *req,
+ const char *rate_node_name)
+{
+ free(req->rate_node_name);
+ req->_present.rate_node_name_len = strlen(rate_node_name);
+ req->rate_node_name = malloc(req->_present.rate_node_name_len + 1);
+ memcpy(req->rate_node_name, rate_node_name, req->_present.rate_node_name_len);
+ req->rate_node_name[req->_present.rate_node_name_len] = 0;
+}
+
+struct devlink_rate_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 rate_node_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *rate_node_name;
+};
+
+void devlink_rate_get_rsp_free(struct devlink_rate_get_rsp *rsp);
+
+/*
+ * Get rate instances.
+ */
+struct devlink_rate_get_rsp *
+devlink_rate_get(struct ynl_sock *ys, struct devlink_rate_get_req *req);
+
+/* DEVLINK_CMD_RATE_GET - dump */
+struct devlink_rate_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_rate_get_req_dump *
+devlink_rate_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_rate_get_req_dump));
+}
+void devlink_rate_get_req_dump_free(struct devlink_rate_get_req_dump *req);
+
+static inline void
+devlink_rate_get_req_dump_set_bus_name(struct devlink_rate_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_rate_get_req_dump_set_dev_name(struct devlink_rate_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_rate_get_list {
+ struct devlink_rate_get_list *next;
+ struct devlink_rate_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void devlink_rate_get_list_free(struct devlink_rate_get_list *rsp);
+
+struct devlink_rate_get_list *
+devlink_rate_get_dump(struct ynl_sock *ys,
+ struct devlink_rate_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_LINECARD_GET ============== */
+/* DEVLINK_CMD_LINECARD_GET - do */
+struct devlink_linecard_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 linecard_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 linecard_index;
+};
+
+static inline struct devlink_linecard_get_req *
+devlink_linecard_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_linecard_get_req));
+}
+void devlink_linecard_get_req_free(struct devlink_linecard_get_req *req);
+
+static inline void
+devlink_linecard_get_req_set_bus_name(struct devlink_linecard_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_linecard_get_req_set_dev_name(struct devlink_linecard_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_linecard_get_req_set_linecard_index(struct devlink_linecard_get_req *req,
+ __u32 linecard_index)
+{
+ req->_present.linecard_index = 1;
+ req->linecard_index = linecard_index;
+}
+
+struct devlink_linecard_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 linecard_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 linecard_index;
+};
+
+void devlink_linecard_get_rsp_free(struct devlink_linecard_get_rsp *rsp);
+
+/*
+ * Get line card instances.
+ */
+struct devlink_linecard_get_rsp *
+devlink_linecard_get(struct ynl_sock *ys, struct devlink_linecard_get_req *req);
+
+/* DEVLINK_CMD_LINECARD_GET - dump */
+struct devlink_linecard_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_linecard_get_req_dump *
+devlink_linecard_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_linecard_get_req_dump));
+}
+void
+devlink_linecard_get_req_dump_free(struct devlink_linecard_get_req_dump *req);
+
+static inline void
+devlink_linecard_get_req_dump_set_bus_name(struct devlink_linecard_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_linecard_get_req_dump_set_dev_name(struct devlink_linecard_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_linecard_get_list {
+ struct devlink_linecard_get_list *next;
+ struct devlink_linecard_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void devlink_linecard_get_list_free(struct devlink_linecard_get_list *rsp);
+
+struct devlink_linecard_get_list *
+devlink_linecard_get_dump(struct ynl_sock *ys,
+ struct devlink_linecard_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_SELFTESTS_GET ============== */
+/* DEVLINK_CMD_SELFTESTS_GET - do */
+struct devlink_selftests_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_selftests_get_req *
+devlink_selftests_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_selftests_get_req));
+}
+void devlink_selftests_get_req_free(struct devlink_selftests_get_req *req);
+
+static inline void
+devlink_selftests_get_req_set_bus_name(struct devlink_selftests_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_selftests_get_req_set_dev_name(struct devlink_selftests_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_selftests_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+void devlink_selftests_get_rsp_free(struct devlink_selftests_get_rsp *rsp);
+
+/*
+ * Get device selftest instances.
+ */
+struct devlink_selftests_get_rsp *
+devlink_selftests_get(struct ynl_sock *ys,
+ struct devlink_selftests_get_req *req);
+
+/* DEVLINK_CMD_SELFTESTS_GET - dump */
+struct devlink_selftests_get_list {
+ struct devlink_selftests_get_list *next;
+ struct devlink_selftests_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void devlink_selftests_get_list_free(struct devlink_selftests_get_list *rsp);
+
+struct devlink_selftests_get_list *
+devlink_selftests_get_dump(struct ynl_sock *ys);
+
#endif /* _LINUX_DEVLINK_GEN_H */
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
index 3ca28d4bcb18..6951bcc7efdc 100644
--- a/tools/net/ynl/lib/ynl.py
+++ b/tools/net/ynl/lib/ynl.py
@@ -395,7 +395,10 @@ class YnlFamily(SpecFamily):
self.family.genl_family['mcast'][mcast_name])
def _add_attr(self, space, name, value):
- attr = self.attr_sets[space][name]
+ try:
+ attr = self.attr_sets[space][name]
+ except KeyError:
+ raise Exception(f"Space '{space}' has no attribute '{name}'")
nl_type = attr.value
if attr["type"] == 'nest':
nl_type |= Netlink.NLA_F_NESTED
@@ -450,7 +453,10 @@ class YnlFamily(SpecFamily):
attr_space = self.attr_sets[space]
rsp = dict()
for attr in attrs:
- attr_spec = attr_space.attrs_by_val[attr.type]
+ try:
+ attr_spec = attr_space.attrs_by_val[attr.type]
+ except KeyError:
+ raise Exception(f"Space '{space}' has no attribute with value '{attr.type}'")
if attr_spec["type"] == 'nest':
subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes'])
decoded = subdict
@@ -479,7 +485,10 @@ class YnlFamily(SpecFamily):
def _decode_extack_path(self, attrs, attr_set, offset, target):
for attr in attrs:
- attr_spec = attr_set.attrs_by_val[attr.type]
+ try:
+ attr_spec = attr_set.attrs_by_val[attr.type]
+ except KeyError:
+ raise Exception(f"Space '{attr_set.name}' has no attribute with value '{attr.type}'")
if offset > target:
break
if offset == target:
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
index 71c00bfafbc9..7b2d421f09cf 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
@@ -11,7 +11,6 @@ finish()
{
ip netns delete server || true
ip netns delete client || true
- ip link del link1_1 || true
}
trap finish EXIT
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 04341e1b38f0..8b017070960d 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -40,6 +40,7 @@ TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
TEST_PROGS += srv6_hencap_red_l3vpn_test.sh
TEST_PROGS += srv6_hl2encap_red_l2vpn_test.sh
TEST_PROGS += srv6_end_next_csid_l3vpn_test.sh
+TEST_PROGS += srv6_end_x_next_csid_l3vpn_test.sh
TEST_PROGS += srv6_end_flavors_test.sh
TEST_PROGS += vrf_strict_mode_test.sh
TEST_PROGS += arp_ndisc_evict_nocarrier.sh
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 35d89dfa6f11..c14ad6e75c1e 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -9,13 +9,16 @@ ret=0
ksft_skip=4
# all tests in this script. Can be overridden with -t option
-TESTS="unregister down carrier nexthop suppress ipv6_notify ipv4_notify ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr ipv4_mangle ipv6_mangle ipv4_bcast_neigh"
+TESTS="unregister down carrier nexthop suppress ipv6_notify ipv4_notify \
+ ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics \
+ ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr \
+ ipv4_mangle ipv6_mangle ipv4_bcast_neigh fib6_gc_test"
VERBOSE=0
PAUSE_ON_FAIL=no
PAUSE=no
-IP="ip -netns ns1"
-NS_EXEC="ip netns exec ns1"
+IP="$(which ip) -netns ns1"
+NS_EXEC="$(which ip) netns exec ns1"
which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
@@ -747,6 +750,68 @@ fib_notify_test()
cleanup &> /dev/null
}
+fib6_gc_test()
+{
+ setup
+
+ echo
+ echo "Fib6 garbage collection test"
+ set -e
+
+ EXPIRE=3
+
+ # Check expiration of routes every $EXPIRE seconds (GC)
+ $NS_EXEC sysctl -wq net.ipv6.route.gc_interval=$EXPIRE
+
+ $IP link add dummy_10 type dummy
+ $IP link set dev dummy_10 up
+ $IP -6 address add 2001:10::1/64 dev dummy_10
+
+ $NS_EXEC sysctl -wq net.ipv6.route.flush=1
+
+ # Temporary routes
+ for i in $(seq 1 1000); do
+ # Expire route after $EXPIRE seconds
+ $IP -6 route add 2001:20::$i \
+ via 2001:10::2 dev dummy_10 expires $EXPIRE
+ done
+ sleep $(($EXPIRE * 2))
+ N_EXP_SLEEP=$($IP -6 route list |grep expires|wc -l)
+ if [ $N_EXP_SLEEP -ne 0 ]; then
+ echo "FAIL: expected 0 routes with expires, got $N_EXP_SLEEP"
+ ret=1
+ else
+ ret=0
+ fi
+
+ # Permanent routes
+ for i in $(seq 1 5000); do
+ $IP -6 route add 2001:30::$i \
+ via 2001:10::2 dev dummy_10
+ done
+ # Temporary routes
+ for i in $(seq 1 1000); do
+ # Expire route after $EXPIRE seconds
+ $IP -6 route add 2001:20::$i \
+ via 2001:10::2 dev dummy_10 expires $EXPIRE
+ done
+ sleep $(($EXPIRE * 2))
+ N_EXP_SLEEP=$($IP -6 route list |grep expires|wc -l)
+ if [ $N_EXP_SLEEP -ne 0 ]; then
+ echo "FAIL: expected 0 routes with expires," \
+ "got $N_EXP_SLEEP (5000 permanent routes)"
+ ret=1
+ else
+ ret=0
+ fi
+
+ set +e
+
+ log_test $ret 0 "ipv6 route garbage collection"
+
+ cleanup &> /dev/null
+}
+
fib_suppress_test()
{
echo
@@ -2217,6 +2282,7 @@ do
ipv4_mangle) ipv4_mangle_test;;
ipv6_mangle) ipv6_mangle_test;;
ipv4_bcast_neigh) ipv4_bcast_neigh_test;;
+ fib6_gc_test|ipv6_gc) fib6_gc_test;;
help) echo "Test names: $TESTS"; exit 0;;
esac
diff --git a/tools/testing/selftests/net/forwarding/bridge_locked_port.sh b/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
index dc92d32464f6..9af9f6964808 100755
--- a/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_locked_port.sh
@@ -9,6 +9,7 @@ ALL_TESTS="
locked_port_mab_roam
locked_port_mab_config
locked_port_mab_flush
+ locked_port_mab_redirect
"
NUM_NETIFS=4
@@ -319,6 +320,41 @@ locked_port_mab_flush()
log_test "Locked port MAB FDB flush"
}
+# Check that traffic can be redirected from a locked bridge port and that it
+# does not create locked FDB entries.
+locked_port_mab_redirect()
+{
+ RET=0
+ check_port_mab_support || return 0
+
+ bridge link set dev $swp1 learning on locked on mab on
+ tc qdisc add dev $swp1 clsact
+ tc filter add dev $swp1 ingress protocol all pref 1 handle 101 flower \
+ action mirred egress redirect dev $swp2
+
+ ping_do $h1 192.0.2.2
+ check_err $? "Ping did not work with redirection"
+
+ bridge fdb get `mac_get $h1` br br0 vlan 1 2> /dev/null | \
+ grep "dev $swp1" | grep -q "locked"
+ check_fail $? "Locked entry created for redirected traffic"
+
+ tc filter del dev $swp1 ingress protocol all pref 1 handle 101 flower
+
+ ping_do $h1 192.0.2.2
+ check_fail $? "Ping worked without redirection"
+
+ bridge fdb get `mac_get $h1` br br0 vlan 1 2> /dev/null | \
+ grep "dev $swp1" | grep -q "locked"
+ check_err $? "Locked entry not created after deleting filter"
+
+ bridge fdb del `mac_get $h1` vlan 1 dev $swp1 master
+ tc qdisc del dev $swp1 clsact
+ bridge link set dev $swp1 learning off locked off mab off
+
+ log_test "Locked port MAB redirect"
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index dced4f612a78..9c2012d70b08 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -16,7 +16,8 @@ tests="
connect_v4 ip4-xon: Basic ipv4 ping between two NS
nat_connect_v4 ip4-nat-xon: Basic ipv4 tcp connection via NAT
netlink_checks ovsnl: validate netlink attrs and settings
- upcall_interfaces ovs: test the upcall interfaces"
+ upcall_interfaces ovs: test the upcall interfaces
+ drop_reason drop: test drop reasons are emitted"
info() {
[ $VERBOSE = 0 ] || echo $*
@@ -141,6 +142,25 @@ ovs_add_flow () {
return 0
}
+ovs_drop_record_and_run () {
+ local sbx=$1
+ shift
+
+ perf record -a -q -e skb:kfree_skb -o ${ovs_dir}/perf.data $* \
+ >> ${ovs_dir}/stdout 2>> ${ovs_dir}/stderr
+ return $?
+}
+
+ovs_drop_reason_count()
+{
+ local reason=$1
+
+ local perf_output=`perf script -i ${ovs_dir}/perf.data -F trace:event,trace`
+ local pattern="skb:kfree_skb:.*reason: $reason"
+
+ return `echo "$perf_output" | grep "$pattern" | wc -l`
+}
+
usage() {
echo
echo "$0 [OPTIONS] [TEST]..."
@@ -155,6 +175,76 @@ usage() {
exit 1
}
+# drop_reason test
+# - drop packets and verify the right drop reason is reported
+test_drop_reason() {
+ which perf >/dev/null 2>&1 || return $ksft_skip
+
+ sbx_add "test_drop_reason" || return $?
+
+ ovs_add_dp "test_drop_reason" dropreason || return 1
+
+ info "create namespaces"
+ for ns in client server; do
+ ovs_add_netns_and_veths "test_drop_reason" "dropreason" "$ns" \
+ "${ns:0:1}0" "${ns:0:1}1" || return 1
+ done
+
+ # Setup client namespace
+ ip netns exec client ip addr add 172.31.110.10/24 dev c1
+ ip netns exec client ip link set c1 up
+
+ # Setup server namespace
+ ip netns exec server ip addr add 172.31.110.20/24 dev s1
+ ip netns exec server ip link set s1 up
+
+ # Allow ARP
+ ovs_add_flow "test_drop_reason" dropreason \
+ 'in_port(1),eth(),eth_type(0x0806),arp()' '2' || return 1
+ ovs_add_flow "test_drop_reason" dropreason \
+ 'in_port(2),eth(),eth_type(0x0806),arp()' '1' || return 1
+
+ # Allow client ICMP traffic but drop return path
+ ovs_add_flow "test_drop_reason" dropreason \
+ "in_port(1),eth(),eth_type(0x0800),ipv4(src=172.31.110.10,proto=1),icmp()" '2'
+ ovs_add_flow "test_drop_reason" dropreason \
+ "in_port(2),eth(),eth_type(0x0800),ipv4(src=172.31.110.20,proto=1),icmp()" 'drop'
+
+ ovs_drop_record_and_run "test_drop_reason" ip netns exec client ping -c 2 172.31.110.20
+ ovs_drop_reason_count 0x30001 # OVS_DROP_FLOW_ACTION
+ if [[ "$?" -ne "2" ]]; then
+ info "Did not detect expected drops: $?"
+ return 1
+ fi
+
+ # Drop UDP 6000 traffic with an explicit action and an error code.
+ ovs_add_flow "test_drop_reason" dropreason \
+ "in_port(1),eth(),eth_type(0x0800),ipv4(src=172.31.110.10,proto=17),udp(dst=6000)" \
+ 'drop(42)'
+ # Drop UDP 7000 traffic with an explicit action with no error code.
+ ovs_add_flow "test_drop_reason" dropreason \
+ "in_port(1),eth(),eth_type(0x0800),ipv4(src=172.31.110.10,proto=17),udp(dst=7000)" \
+ 'drop(0)'
+
+ ovs_drop_record_and_run \
+ "test_drop_reason" ip netns exec client nc -i 1 -zuv 172.31.110.20 6000
+ ovs_drop_reason_count 0x30004 # OVS_DROP_EXPLICIT_ACTION_ERROR
+ if [[ "$?" -ne "1" ]]; then
+ info "Did not detect expected explicit error drops: $?"
+ return 1
+ fi
+
+ ovs_drop_record_and_run \
+ "test_drop_reason" ip netns exec client nc -i 1 -zuv 172.31.110.20 7000
+ ovs_drop_reason_count 0x30003 # OVS_DROP_EXPLICIT_ACTION
+ if [[ "$?" -ne "1" ]]; then
+ info "Did not detect expected explicit drops: $?"
+ return 1
+ fi
+
+ return 0
+}
+
# arp_ping test
# - client has 1500 byte MTU
# - server has 1500 byte MTU
@@ -393,6 +483,16 @@ test_netlink_checks () {
wc -l) == 2 ] || \
return 1
+ ERR_MSG="Flow actions may not be safe on all matching packets"
+ PRE_TEST=$(dmesg | grep -c "${ERR_MSG}")
+ ovs_add_flow "test_netlink_checks" nv0 \
+ 'in_port(1),eth(),eth_type(0x0806),arp()' 'drop(0),2' \
+ &> /dev/null && return 1
+ POST_TEST=$(dmesg | grep -c "${ERR_MSG}")
+ if [ "$PRE_TEST" == "$POST_TEST" ]; then
+ info "failed - error not generated"
+ return 1
+ fi
return 0
}
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index fbdac15e3134..912dc8c49085 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -301,6 +301,7 @@ class ovsactions(nla):
("OVS_ACTION_ATTR_CHECK_PKT_LEN", "none"),
("OVS_ACTION_ATTR_ADD_MPLS", "none"),
("OVS_ACTION_ATTR_DEC_TTL", "none"),
+ ("OVS_ACTION_ATTR_DROP", "uint32"),
)
class ctact(nla):
@@ -447,6 +448,8 @@ class ovsactions(nla):
print_str += "recirc(0x%x)" % int(self.get_attr(field[0]))
elif field[0] == "OVS_ACTION_ATTR_TRUNC":
print_str += "trunc(%d)" % int(self.get_attr(field[0]))
+ elif field[0] == "OVS_ACTION_ATTR_DROP":
+ print_str += "drop(%d)" % int(self.get_attr(field[0]))
elif field[1] == "flag":
if field[0] == "OVS_ACTION_ATTR_CT_CLEAR":
print_str += "ct_clear"
@@ -468,10 +471,21 @@ class ovsactions(nla):
while len(actstr) != 0:
parsed = False
if actstr.startswith("drop"):
- # for now, drops have no explicit action, so we
- # don't need to set any attributes. The final
- # act of the processing chain will just drop the packet
- return
+ # If no reason is provided, the implicit drop is used (i.e no
+ # action). If some reason is given, an explicit action is used.
+ actstr, reason = parse_extract_field(
+ actstr,
+ "drop(",
+ "([0-9]+)",
+ lambda x: int(x, 0),
+ False,
+ None,
+ )
+ if reason is not None:
+ self["attrs"].append(["OVS_ACTION_ATTR_DROP", reason])
+ parsed = True
+ else:
+ return
elif parse_starts_block(actstr, "^(\d+)", False, True):
actstr, output = parse_extract_field(
diff --git a/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh
new file mode 100755
index 000000000000..c79cb8ede17f
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh
@@ -0,0 +1,1213 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <[email protected]>
+# author: Paolo Lungaroni <[email protected]>
+#
+# This script is designed for testing the support of NEXT-C-SID flavor for SRv6
+# End.X behavior.
+# A basic knowledge of SRv6 architecture [1] and of the compressed SID approach
+# [2] is assumed for the reader.
+#
+# The network topology used in the selftest is depicted hereafter, composed of
+# two hosts and four routers. Hosts hs-1 and hs-2 are connected through an
+# IPv4/IPv6 L3 VPN service, offered by routers rt-1, rt-2, rt-3 and rt-4 using
+# the NEXT-C-SID flavor. The key components for such VPNs are:
+#
+# i) The SRv6 H.Encaps/H.Encaps.Red behaviors [1] apply SRv6 Policies on
+# traffic received by connected hosts, initiating the VPN tunnel;
+#
+# ii) The SRv6 End.X behavior [1] (Endpoint with L3 cross connect) is a
+# variant of SRv6 End behavior. It advances the active SID in the SID
+# List carried by the SRH and forwards the packet to an L3 adjacency;
+#
+# iii) The NEXT-C-SID mechanism [2] offers the possibility of encoding several
+# SRv6 segments within a single 128-bit SID address, referred to as a
+# Compressed SID (C-SID) container. In this way, the length of the SID
+# List can be drastically reduced.
+# The NEXT-C-SID is provided as a "flavor" of the SRv6 End.X behavior
+# which advances the current C-SID (i.e. the Locator-Node Function defined
+# in [2]) with the next one carried in the Argument, if available.
+# When no more C-SIDs are available in the Argument, the SRv6 End.X
+# behavior will apply the End.X function selecting the next SID in the SID
+# List;
+#
+# iv) The SRv6 End.DT46 behavior [1] is used for removing the SRv6 Policy and,
+# thus, it terminates the VPN tunnel. Such a behavior is capable of
+# handling, at the same time, both tunneled IPv4 and IPv6 traffic.
+#
+# [1] https://datatracker.ietf.org/doc/html/rfc8986
+# [2] https://datatracker.ietf.org/doc/html/draft-ietf-spring-srv6-srh-compression
+#
+#
+# cafe::1 cafe::2
+# 10.0.0.1 10.0.0.2
+# +--------+ +--------+
+# | | | |
+# | hs-1 | | hs-2 |
+# | | | |
+# +---+----+ +----+---+
+# cafe::/64 | | cafe::/64
+# 10.0.0.0/24 | | 10.0.0.0/24
+# +---+----+ +----+---+
+# | | fcf0:0:1:2::/64 | |
+# | rt-1 +-------------------+ rt-2 |
+# | | | |
+# +---+----+ +----+---+
+# | . . |
+# | fcf0:0:1:3::/64 . |
+# | . . |
+# | . . |
+# fcf0:0:1:4::/64 | . | fcf0:0:2:3::/64
+# | . . |
+# | . . |
+# | fcf0:0:2:4::/64 . |
+# | . . |
+# +---+----+ +----+---+
+# | | | |
+# | rt-4 +-------------------+ rt-3 |
+# | | fcf0:0:3:4::/64 | |
+# +---+----+ +----+---+
+#
+# Every fcf0:0:x:y::/64 network interconnects the SRv6 routers rt-x with rt-y in
+# the selftest network.
+#
+# Local SID/C-SID table
+# =====================
+#
+# Each SRv6 router is configured with a Local SID/C-SID table in which
+# SIDs/C-SIDs are stored. Considering an SRv6 router rt-x, SIDs/C-SIDs are
+# configured in the Local SID/C-SIDs table as follows:
+#
+# Local SID/C-SID table for SRv6 router rt-x
+# +-----------------------------------------------------------+
+# |fcff:x::d46 is associated with the non-compressed SRv6 |
+# | End.DT46 behavior |
+# +-----------------------------------------------------------+
+# |fcbb:0:0x00::/48 is associated with the NEXT-C-SID flavor |
+# | of SRv6 End.X behavior |
+# +-----------------------------------------------------------+
+# |fcbb:0:0x00:d46::/64 is associated with the SRv6 End.DT46 |
+# | behavior when NEXT-C-SID compression is turned on |
+# +-----------------------------------------------------------+
+#
+# The fcff::/16 prefix is reserved for implementing SRv6 services with regular
+# (non compressed) SIDs. Reachability of SIDs is ensured by proper configuration
+# of the IPv6 routing tables in the routers.
+# Similarly, the fcbb:0::/32 prefix is reserved for implementing SRv6 VPN
+# services leveraging the NEXT-C-SID compression mechanism. Indeed, the
+# fcbb:0::/32 is used for encoding the Locator-Block while the Locator-Node
+# Function is encoded with 16 bits.
+#
+# Incoming traffic classification and application of SRv6 Policies
+# ================================================================
+#
+# An SRv6 ingress router applies different SRv6 Policies to the traffic received
+# from a connected host, considering the IPv4 or IPv6 destination address.
+# SRv6 policy enforcement consists of encapsulating the received traffic into a
+# new IPv6 packet with a given SID List contained in the SRH.
+# When the SID List contains only one SID, the SRH could be omitted completely
+# and that SID is stored directly in the IPv6 Destination Address (DA) (this is
+# called "reduced" encapsulation).
+#
+# Test cases for NEXT-C-SID
+# =========================
+#
+# We consider two test cases for NEXT-C-SID: i) single SID and ii) double SID.
+#
+# In the single SID test case we have a number of segments that are all
+# contained in a single Compressed SID (C-SID) container. Therefore the
+# resulting SID List has only one SID. Using the reduced encapsulation format
+# this will result in a packet with no SRH.
+#
+# In the double SID test case we have one segment carried in a Compressed SID
+# (C-SID) container, followed by a regular (non compressed) SID. The resulting
+# SID List has two segments and it is possible to test the advance to the next
+# SID when all the C-SIDs in a C-SID container have been processed. Using the
+# reduced encapsulation format this will result in a packet with an SRH
+# containing 1 segment.
+#
+# For the single SID test case, we use the IPv6 addresses of hs-1 and hs-2, for
+# the double SID test case, we use their IPv4 addresses. This is only done to
+# simplify the test setup and avoid adding other hosts or multiple addresses on
+# the same interface of a host.
+#
+# Traffic from hs-1 to hs-2
+# -------------------------
+#
+# Packets generated from hs-1 and directed towards hs-2 are handled by rt-1
+# which applies the SRv6 Policies as follows:
+#
+# i) IPv6 DA=cafe::2, H.Encaps.Red with SID List=fcbb:0:0300:0200:d46::
+# ii) IPv4 DA=10.0.0.2, H.Encaps.Red with SID List=fcbb:0:0300::,fcff:2::d46
+#
+# ### i) single SID
+#
+# The router rt-1 is configured to enforce the given Policy through the SRv6
+# H.Encaps.Red behavior which avoids the presence of the SRH at all, since it
+# pushes the single SID directly in the IPv6 DA. Such a SID encodes a whole
+# C-SID container carrying several C-SIDs (e.g. 0300, 0200, etc).
+#
+# As the packet reaches the router rt-3, the enabled NEXT-C-SID SRv6 End.X
+# behavior (associated with fcbb:0:0300::/48) is triggered. This behavior
+# analyzes the IPv6 DA and checks whether the Argument of the C-SID container
+# is zero or not. In this case, the Argument is *NOT* zero and the IPv6 DA is
+# updated as follows:
+#
+# +-----------------------------------------------------------------+
+# | Before applying the rt-3 enabled NEXT-C-SID SRv6 End.X behavior |
+# +-----------------------------------------------------------------+
+# | +---------- Argument |
+# | vvvvvvvvvv |
+# | IPv6 DA fcbb:0:0300:0200:d46:: |
+# | ^^^^ <-- shifting |
+# | | |
+# | Locator-Node Function |
+# +-----------------------------------------------------------------+
+# | After applying the rt-3 enabled NEXT-C-SID SRv6 End.X behavior |
+# +-----------------------------------------------------------------+
+# | +---------- Argument |
+# | vvvvvv |
+# | IPv6 DA fcbb:0:0200:d46:: |
+# | ^^^^ |
+# | | |
+# | Locator-Node Function |
+# +-----------------------------------------------------------------+
+#
+# After having applied the enabled NEXT-C-SID SRv6 End.X behavior, the packet
+# is sent to rt-4 node using the L3 adjacency address fcf0:0:3:4::4.
+#
+# The node rt-4 performs a plain IPv6 forward to the rt-2 router according to
+# its Local SID table and using the IPv6 DA fcbb:0:0200:d46:: .
+#
+# The router rt-2 is configured for decapsulating the inner IPv6 packet and,
+# for this reason, it applies the SRv6 End.DT46 behavior on the received
+# packet. It is worth noting that the SRv6 End.DT46 behavior does not require
+# the presence of the SRH: it is fully capable to operate properly on
+# IPv4/IPv6-in-IPv6 encapsulations.
+# At the end of the decap operation, the packet is sent to the host hs-2.
+#
+# ### ii) double SID
+#
+# The router rt-1 is configured to enforce the given Policy through the SRv6
+# H.Encaps.Red. As a result, the first SID fcbb:0:0300:: is stored into the
+# IPv6 DA, while the SRH pushed into the packet is made of only one SID, i.e.
+# fcff:2::d46. Hence, the packet sent by hs-1 to hs-2 is encapsulated in an
+# outer IPv6 header plus the SRH.
+#
+# As the packet reaches the node rt-3, the router applies the enabled NEXT-C-SID
+# SRv6 End.X behavior.
+#
+# +-----------------------------------------------------------------+
+# | Before applying the rt-3 enabled NEXT-C-SID SRv6 End.X behavior |
+# +-----------------------------------------------------------------+
+# | +---------- Argument |
+# | vvvv (Argument is all filled with zeros) |
+# | IPv6 DA fcbb:0:0300:: |
+# | ^^^^ |
+# | | |
+# | Locator-Node Function |
+# +-----------------------------------------------------------------+
+# | After applying the rt-3 enabled NEXT-C-SID SRv6 End.X behavior |
+# +-----------------------------------------------------------------+
+# | |
+# | IPv6 DA fcff:2::d46 |
+# | ^^^^^^^^^^^ |
+# | | |
+# | SID copied from the SID List contained in the SRH |
+# +-----------------------------------------------------------------+
+#
+# Since the Argument of the C-SID container is zero, the behavior can not
+# update the Locator-Node function with the next C-SID carried in the Argument
+# itself. Thus, the enabled NEXT-C-SID SRv6 End.X behavior operates as the
+# traditional End.X behavior: it updates the IPv6 DA by copying the next
+# available SID in the SID List carried by the SRH. Next, the packet is
+# forwarded to the rt-4 node using the L3 adjacency fcf0:3:4::4 previously
+# configured for this behavior.
+#
+# The node rt-4 performs a plain IPv6 forward to the rt-2 router according to
+# its Local SID table and using the IPv6 DA fcff:2::d46.
+#
+# Once the packet is received by rt-2, the router decapsulates the inner IPv4
+# packet using the SRv6 End.DT46 behavior (associated with the SID fcff:2::d46)
+# and sends it to the host hs-2.
+#
+# Traffic from hs-2 to hs-1
+# -------------------------
+#
+# Packets generated from hs-2 and directed towards hs-1 are handled by rt-2
+# which applies the SRv6 Policies as follows:
+#
+# i) IPv6 DA=cafe::1, SID List=fcbb:0:0400:0100:d46::
+# ii) IPv4 DA=10.0.0.1, SID List=fcbb:0:0300::,fcff:1::d46
+#
+# ### i) single SID
+#
+# The node hs-2 sends an IPv6 packet directed to node hs-1. The router rt-2 is
+# directly connected to hs-2 and receives the packet. Rt-2 applies the
+# H.Encap.Red behavior with policy i) described above. Since there is only one
+# SID, the SRH header is omitted and the policy is inserted directly into the DA
+# of IPv6 packet.
+#
+# The packet reaches the router rt-4 and the enabled NEXT-C-SID SRv6 End.X
+# behavior (associated with fcbb:0:0400::/48) is triggered. This behavior
+# analyzes the IPv6 DA and checks whether the Argument of the C-SID container
+# is zero or not. The Argument is *NOT* zero and the C-SID in the IPv6 DA is
+# advanced. At this point, the current IPv6 DA is fcbb:0:0100:d46:: .
+# The enabled NEXT-C-SID SRv6 End.X behavior is configured with the L3 adjacency
+# fcf0:0:1:4::1, used to route traffic to the rt-1 node.
+#
+# The router rt-1 is configured for decapsulating the inner packet. It applies
+# the SRv6 End.DT46 behavior on the received packet. Decapsulation does not
+# require the presence of the SRH. At the end of the decap operation, the packet
+# is sent to the host hs-1.
+#
+# ### ii) double SID
+#
+# The router rt-2 is configured to enforce the given Policy through the SRv6
+# H.Encaps.Red. As a result, the first SID fcbb:0:0300:: is stored into the
+# IPv6 DA, while the SRH pushed into the packet is made of only one SID, i.e.
+# fcff:1::d46. Hence, the packet sent by hs-2 to hs-1 is encapsulated in an
+# outer IPv6 header plus the SRH.
+#
+# As the packet reaches the node rt-3, the enabled NEXT-C-SID SRv6 End.X
+# behavior bound to the SID fcbb:0:0300::/48 is triggered.
+# Since the Argument of the C-SID container is zero, the behavior can not
+# update the Locator-Node function with the next C-SID carried in the Argument
+# itself. Thus, the enabled NEXT-C-SID SRv6 End-X behavior operates as the
+# traditional End.X behavior: it updates the IPv6 DA by copying the next
+# available SID in the SID List carried by the SRH. After that, the packet is
+# forwarded to the rt-4 node using the L3 adjacency (fcf0:3:4::4) previously
+# configured for this behavior.
+#
+# The node rt-4 performs a plain IPv6 forward to the rt-1 router according to
+# its Local SID table, considering the IPv6 DA fcff:1::d46.
+#
+# Once the packet is received by rt-1, the router decapsulates the inner IPv4
+# packet using the SRv6 End.DT46 behavior (associated with the SID fcff:1::d46)
+# and sends it to the host hs-1.
+
+# Kselftest framework requirement - SKIP code is 4.
+readonly ksft_skip=4
+
+readonly RDMSUFF="$(mktemp -u XXXXXXXX)"
+readonly DUMMY_DEVNAME="dum0"
+readonly VRF_TID=100
+readonly VRF_DEVNAME="vrf-${VRF_TID}"
+readonly RT2HS_DEVNAME="veth-t${VRF_TID}"
+readonly LOCALSID_TABLE_ID=90
+readonly IPv6_RT_NETWORK=fcf0:0
+readonly IPv6_HS_NETWORK=cafe
+readonly IPv4_HS_NETWORK=10.0.0
+readonly VPN_LOCATOR_SERVICE=fcff
+readonly DT46_FUNC=0d46
+readonly HEADEND_ENCAP="encap.red"
+
+# do not add ':' as separator
+readonly LCBLOCK_ADDR=fcbb0000
+readonly LCBLOCK_BLEN=32
+# do not add ':' as separator
+readonly LCNODEFUNC_FMT="0%d00"
+readonly LCNODEFUNC_BLEN=16
+
+readonly LCBLOCK_NODEFUNC_BLEN=$((LCBLOCK_BLEN + LCNODEFUNC_BLEN))
+
+readonly CSID_CNTR_PREFIX="dead:beaf::/32"
+# ID of the router used for testing the C-SID container cfgs
+readonly CSID_CNTR_RT_ID_TEST=1
+# Routing table used for testing the C-SID container cfgs
+readonly CSID_CNTR_RT_TABLE=91
+
+# C-SID container configurations to be tested
+#
+# An entry of the array is defined as "a,b,c" where:
+# - 'a' and 'b' elements represent respectively the Locator-Block length
+# (lblen) in bits and the Locator-Node Function length (nflen) in bits.
+# 'a' and 'b' can be set to default values using the placeholder "d" which
+# indicates the default kernel values (32 for lblen and 16 for nflen);
+# otherwise, any numeric value is accepted;
+# - 'c' indicates whether the C-SID configuration provided by the values 'a'
+# and 'b' should be considered valid ("y") or invalid ("n").
+declare -ra CSID_CONTAINER_CFGS=(
+ "d,d,y"
+ "d,16,y"
+ "16,d,y"
+ "16,32,y"
+ "32,16,y"
+ "48,8,y"
+ "8,48,y"
+ "d,0,n"
+ "0,d,n"
+ "32,0,n"
+ "0,32,n"
+ "17,d,n"
+ "d,17,n"
+ "120,16,n"
+ "16,120,n"
+ "0,128,n"
+ "128,0,n"
+ "130,0,n"
+ "0,130,n"
+ "0,0,n"
+)
+
+PING_TIMEOUT_SEC=4
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+# IDs of routers and hosts are initialized during the setup of the testing
+# network
+ROUTERS=''
+HOSTS=''
+
+SETUP_ERR=1
+
+ret=${ksft_skip}
+nsuccess=0
+nfail=0
+
+log_test()
+{
+ local rc="$1"
+ local expected="$2"
+ local msg="$3"
+
+ if [ "${rc}" -eq "${expected}" ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ printf "\nTests passed: %3d\n" "${nsuccess}"
+ printf "Tests failed: %3d\n" "${nfail}"
+
+ # when a test fails, the value of 'ret' is set to 1 (error code).
+ # Conversely, when all tests are passed successfully, the 'ret' value
+ # is set to 0 (success code).
+ if [ "${ret}" -ne 1 ]; then
+ ret=0
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+test_command_or_ksft_skip()
+{
+ local cmd="$1"
+
+ if [ ! -x "$(command -v "${cmd}")" ]; then
+ echo "SKIP: Could not run test without \"${cmd}\" tool";
+ exit "${ksft_skip}"
+ fi
+}
+
+get_nodename()
+{
+ local name="$1"
+
+ echo "${name}-${RDMSUFF}"
+}
+
+get_rtname()
+{
+ local rtid="$1"
+
+ get_nodename "rt-${rtid}"
+}
+
+get_hsname()
+{
+ local hsid="$1"
+
+ get_nodename "hs-${hsid}"
+}
+
+__create_namespace()
+{
+ local name="$1"
+
+ ip netns add "${name}"
+}
+
+create_router()
+{
+ local rtid="$1"
+ local nsname
+
+ nsname="$(get_rtname "${rtid}")"
+
+ __create_namespace "${nsname}"
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.forwarding=1
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.default.rp_filter=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.ip_forward=1
+}
+
+create_host()
+{
+ local hsid="$1"
+ local nsname
+
+ nsname="$(get_hsname "${hsid}")"
+
+ __create_namespace "${nsname}"
+}
+
+cleanup()
+{
+ local nsname
+ local i
+
+ # destroy routers
+ for i in ${ROUTERS}; do
+ nsname="$(get_rtname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # destroy hosts
+ for i in ${HOSTS}; do
+ nsname="$(get_hsname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # check whether the setup phase was completed successfully or not. In
+ # case of an error during the setup phase of the testing environment,
+ # the selftest is considered as "skipped".
+ if [ "${SETUP_ERR}" -ne 0 ]; then
+ echo "SKIP: Setting up the testing environment failed"
+ exit "${ksft_skip}"
+ fi
+
+ exit "${ret}"
+}
+
+add_link_rt_pairs()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local neigh
+ local nsname
+ local neigh_nsname
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ neigh_nsname="$(get_rtname "${neigh}")"
+
+ ip link add "veth-rt-${rt}-${neigh}" netns "${nsname}" \
+ type veth peer name "veth-rt-${neigh}-${rt}" \
+ netns "${neigh_nsname}"
+ done
+}
+
+get_network_prefix()
+{
+ local rt="$1"
+ local neigh="$2"
+ local p="${rt}"
+ local q="${neigh}"
+
+ if [ "${p}" -gt "${q}" ]; then
+ p="${q}"; q="${rt}"
+ fi
+
+ echo "${IPv6_RT_NETWORK}:${p}:${q}"
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local nsname
+ local net_prefix
+ local devname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ ip -netns "${nsname}" addr \
+ add "${net_prefix}::${rt}/64" dev "${devname}" nodad
+
+ ip -netns "${nsname}" link set "${devname}" up
+ done
+
+ ip -netns "${nsname}" link add "${DUMMY_DEVNAME}" type dummy
+
+ ip -netns "${nsname}" link set "${DUMMY_DEVNAME}" up
+ ip -netns "${nsname}" link set lo up
+}
+
+# build an ipv6 prefix/address based on the input string
+# Note that the input string does not contain ':' and '::' which are considered
+# to be implicit.
+# e.g.:
+# - input: fbcc00000400300
+# - output: fbcc:0000:0400:0300:0000:0000:0000:0000
+# ^^^^^^^^^^^^^^^^^^^
+# fill the address with 0s
+build_ipv6_addr()
+{
+ local addr="$1"
+ local out=""
+ local strlen="${#addr}"
+ local padn
+ local i
+
+ # add ":" every 4 digits (16 bits)
+ for (( i = 0; i < strlen; i++ )); do
+ if (( i > 0 && i < 32 && (i % 4) == 0 )); then
+ out="${out}:"
+ fi
+
+ out="${out}${addr:$i:1}"
+ done
+
+ # fill the remaining bits of the address with 0s
+ padn=$((32 - strlen))
+ for (( i = padn; i > 0; i-- )); do
+ if (( i > 0 && i < 32 && (i % 4) == 0 )); then
+ out="${out}:"
+ fi
+
+ out="${out}0"
+ done
+
+ printf "${out}"
+}
+
+build_csid()
+{
+ local nodeid="$1"
+
+ printf "${LCNODEFUNC_FMT}" "${nodeid}"
+}
+
+build_lcnode_func_prefix()
+{
+ local nodeid="$1"
+ local lcnodefunc
+ local prefix
+ local out
+
+ lcnodefunc="$(build_csid "${nodeid}")"
+ prefix="$(build_ipv6_addr "${LCBLOCK_ADDR}${lcnodefunc}")"
+
+ out="${prefix}/${LCBLOCK_NODEFUNC_BLEN}"
+
+ echo "${out}"
+}
+
+set_end_x_nextcsid()
+{
+ local rt="$1"
+ local adj="$2"
+
+ nsname="$(get_rtname "${rt}")"
+ net_prefix="$(get_network_prefix "${rt}" "${adj}")"
+ lcnode_func_prefix="$(build_lcnode_func_prefix "${rt}")"
+
+ # enabled NEXT-C-SID SRv6 End.X behavior (note that "dev" is the dummy
+ # dum0 device chosen for the sake of simplicity).
+ ip -netns "${nsname}" -6 route \
+ replace "${lcnode_func_prefix}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End.X nh6 "${net_prefix}::${adj}" \
+ flavors next-csid lblen "${LCBLOCK_BLEN}" \
+ nflen "${LCNODEFUNC_BLEN}" dev "${DUMMY_DEVNAME}"
+}
+
+set_underlay_sids_reachability()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ # set underlay network routes for SIDs reachability
+ ip -netns "${nsname}" -6 route \
+ replace "${VPN_LOCATOR_SERVICE}:${neigh}::/32" \
+ table "${LOCALSID_TABLE_ID}" \
+ via "${net_prefix}::${neigh}" dev "${devname}"
+
+ # set the underlay network for C-SIDs reachability
+ lcnode_func_prefix="$(build_lcnode_func_prefix "${neigh}")"
+
+ ip -netns "${nsname}" -6 route \
+ replace "${lcnode_func_prefix}" \
+ table "${LOCALSID_TABLE_ID}" \
+ via "${net_prefix}::${neigh}" dev "${devname}"
+ done
+}
+
+# Setup local SIDs for an SRv6 router
+setup_rt_local_sids()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local net_prefix
+ local devname
+ local nsname
+ local neigh
+ local lcnode_func_prefix
+ local lcblock_prefix
+
+ nsname="$(get_rtname "${rt}")"
+
+ set_underlay_sids_reachability "${rt}" "${rt_neighs}"
+
+ # all SIDs for VPNs start with a common locator. Routes and SRv6
+ # Endpoint behavior instaces are grouped together in the 'localsid'
+ # table.
+ ip -netns "${nsname}" -6 rule \
+ add to "${VPN_LOCATOR_SERVICE}::/16" \
+ lookup "${LOCALSID_TABLE_ID}" prio 999
+
+ # common locator block for NEXT-C-SIDS compression mechanism.
+ lcblock_prefix="$(build_ipv6_addr "${LCBLOCK_ADDR}")"
+ ip -netns "${nsname}" -6 rule \
+ add to "${lcblock_prefix}/${LCBLOCK_BLEN}" \
+ lookup "${LOCALSID_TABLE_ID}" prio 999
+}
+
+# build and install the SRv6 policy into the ingress SRv6 router as well as the
+# decap SID in the egress one.
+# args:
+# $1 - src host (evaluate automatically the ingress router)
+# $2 - dst host (evaluate automatically the egress router)
+# $3 - SRv6 routers configured for steering traffic (End.X behaviors)
+# $4 - single SID or double SID
+# $5 - traffic type (IPv6 or IPv4)
+__setup_l3vpn()
+{
+ local src="$1"
+ local dst="$2"
+ local end_rts="$3"
+ local mode="$4"
+ local traffic="$5"
+ local nsname
+ local policy
+ local container
+ local decapsid
+ local lcnfunc
+ local dt
+ local n
+ local rtsrc_nsname
+ local rtdst_nsname
+
+ rtsrc_nsname="$(get_rtname "${src}")"
+ rtdst_nsname="$(get_rtname "${dst}")"
+
+ container="${LCBLOCK_ADDR}"
+
+ # build first SID (C-SID container)
+ for n in ${end_rts}; do
+ lcnfunc="$(build_csid "${n}")"
+
+ container="${container}${lcnfunc}"
+ done
+
+ if [ "${mode}" -eq 1 ]; then
+ # single SID policy
+ dt="$(build_csid "${dst}")${DT46_FUNC}"
+ container="${container}${dt}"
+ # build the full ipv6 address for the container
+ policy="$(build_ipv6_addr "${container}")"
+
+ # build the decap SID used in the decap node
+ container="${LCBLOCK_ADDR}${dt}"
+ decapsid="$(build_ipv6_addr "${container}")"
+ else
+ # double SID policy
+ decapsid="${VPN_LOCATOR_SERVICE}:${dst}::${DT46_FUNC}"
+
+ policy="$(build_ipv6_addr "${container}"),${decapsid}"
+ fi
+
+ # apply encap policy
+ if [ "${traffic}" -eq 6 ]; then
+ ip -netns "${rtsrc_nsname}" -6 route \
+ add "${IPv6_HS_NETWORK}::${dst}" vrf "${VRF_DEVNAME}" \
+ encap seg6 mode "${HEADEND_ENCAP}" segs "${policy}" \
+ dev "${VRF_DEVNAME}"
+
+ ip -netns "${rtsrc_nsname}" -6 neigh \
+ add proxy "${IPv6_HS_NETWORK}::${dst}" \
+ dev "${RT2HS_DEVNAME}"
+ else
+ # "dev" must be different from the one where the packet is
+ # received, otherwise the proxy arp does not work.
+ ip -netns "${rtsrc_nsname}" -4 route \
+ add "${IPv4_HS_NETWORK}.${dst}" vrf "${VRF_DEVNAME}" \
+ encap seg6 mode "${HEADEND_ENCAP}" segs "${policy}" \
+ dev "${VRF_DEVNAME}"
+ fi
+
+ # apply decap
+ # Local End.DT46 behavior (decap)
+ ip -netns "${rtdst_nsname}" -6 route \
+ add "${decapsid}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End.DT46 vrftable "${VRF_TID}" \
+ dev "${VRF_DEVNAME}"
+}
+
+# see __setup_l3vpn()
+setup_ipv4_vpn_2sids()
+{
+ __setup_l3vpn "$1" "$2" "$3" 2 4
+}
+
+# see __setup_l3vpn()
+setup_ipv6_vpn_1sid()
+{
+ __setup_l3vpn "$1" "$2" "$3" 1 6
+}
+
+setup_hs()
+{
+ local hs="$1"
+ local rt="$2"
+ local hsname
+ local rtname
+
+ hsname="$(get_hsname "${hs}")"
+ rtname="$(get_rtname "${rt}")"
+
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip -netns "${hsname}" link add veth0 type veth \
+ peer name "${RT2HS_DEVNAME}" netns "${rtname}"
+
+ ip -netns "${hsname}" addr \
+ add "${IPv6_HS_NETWORK}::${hs}/64" dev veth0 nodad
+ ip -netns "${hsname}" addr add "${IPv4_HS_NETWORK}.${hs}/24" dev veth0
+
+ ip -netns "${hsname}" link set veth0 up
+ ip -netns "${hsname}" link set lo up
+
+ # configure the VRF on the router which is directly connected to the
+ # source host.
+ ip -netns "${rtname}" link \
+ add "${VRF_DEVNAME}" type vrf table "${VRF_TID}"
+ ip -netns "${rtname}" link set "${VRF_DEVNAME}" up
+
+ # enslave the veth interface connecting the router with the host to the
+ # VRF in the access router
+ ip -netns "${rtname}" link \
+ set "${RT2HS_DEVNAME}" master "${VRF_DEVNAME}"
+
+ # set default routes to unreachable for both ipv6 and ipv4
+ ip -netns "${rtname}" -6 route \
+ add unreachable default metric 4278198272 \
+ vrf "${VRF_DEVNAME}"
+ ip -netns "${rtname}" -4 route \
+ add unreachable default metric 4278198272 \
+ vrf "${VRF_DEVNAME}"
+
+ ip -netns "${rtname}" addr \
+ add "${IPv6_HS_NETWORK}::254/64" dev "${RT2HS_DEVNAME}" nodad
+ ip -netns "${rtname}" addr \
+ add "${IPv4_HS_NETWORK}.254/24" dev "${RT2HS_DEVNAME}"
+
+ ip -netns "${rtname}" link set "${RT2HS_DEVNAME}" up
+
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv6.conf."${RT2HS_DEVNAME}".proxy_ndp=1
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".proxy_arp=1
+
+ # disable the rp_filter otherwise the kernel gets confused about how
+ # to route decap ipv4 packets.
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".rp_filter=0
+
+ ip netns exec "${rtname}" sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
+}
+
+setup()
+{
+ local i
+
+ # create routers
+ ROUTERS="1 2 3 4"; readonly ROUTERS
+ for i in ${ROUTERS}; do
+ create_router "${i}"
+ done
+
+ # create hosts
+ HOSTS="1 2"; readonly HOSTS
+ for i in ${HOSTS}; do
+ create_host "${i}"
+ done
+
+ # set up the links for connecting routers
+ add_link_rt_pairs 1 "2 3 4"
+ add_link_rt_pairs 2 "3 4"
+ add_link_rt_pairs 3 "4"
+
+ # set up the basic connectivity of routers and routes required for
+ # reachability of SIDs.
+ setup_rt_networking 1 "2 3 4"
+ setup_rt_networking 2 "1 3 4"
+ setup_rt_networking 3 "1 2 4"
+ setup_rt_networking 4 "1 2 3"
+
+ # set up the hosts connected to routers
+ setup_hs 1 1
+ setup_hs 2 2
+
+ # set up default SRv6 Endpoints (i.e. SRv6 End and SRv6 End.DT46)
+ setup_rt_local_sids 1 "2 3 4"
+ setup_rt_local_sids 2 "1 3 4"
+ setup_rt_local_sids 3 "1 2 4"
+ setup_rt_local_sids 4 "1 2 3"
+
+ # set up SRv6 Policies
+
+ # create an IPv6 VPN between hosts hs-1 and hs-2.
+ #
+ # Direction hs-1 -> hs-2
+ # - rt-1 encap (H.Encaps.Red)
+ # - rt-3 SRv6 End.X behavior adj rt-4 (NEXT-C-SID flavor)
+ # - rt-4 Plain IPv6 Forwarding to rt-2
+ # - rt-2 SRv6 End.DT46 behavior
+ setup_ipv6_vpn_1sid 1 2 "3"
+
+ # Direction hs2 -> hs-1
+ # - rt-2 encap (H.Encaps.Red)
+ # - rt-4 SRv6 End.X behavior adj rt-1 (NEXT-C-SID flavor)
+ # - rt-1 SRv6 End.DT46 behavior
+ setup_ipv6_vpn_1sid 2 1 "4"
+
+ # create an IPv4 VPN between hosts hs-1 and hs-2
+ #
+ # Direction hs-1 -> hs-2
+ # - rt-1 encap (H.Encaps.Red)
+ # - rt-3 SRv6 End.X behavior adj rt-4 (NEXT-C-SID flavor)
+ # - rt-4 Plain IPv6 Forwarding to rt-2
+ # - rt-2 SRv6 End.DT46 behavior
+ setup_ipv4_vpn_2sids 1 2 "3"
+
+ # Direction hs-2 -> hs-1
+ # - rt-2 encap (H.Encaps.Red)
+ # - rt-3 SRv6 End.X behavior adj rt-4 (NEXT-C-SID flavor)
+ # - rt-4 Plain IPv6 Forwarding to rt-1
+ # - rt-1 SRv6 End.DT46 behavior
+ setup_ipv4_vpn_2sids 2 1 "3"
+
+ # Setup the adjacencies in the SRv6 aware routers
+ # - rt-3 SRv6 End.X adjacency with rt-4
+ # - rt-4 SRv6 End.X adjacency with rt-1
+ set_end_x_nextcsid 3 4
+ set_end_x_nextcsid 4 1
+
+ # testing environment was set up successfully
+ SETUP_ERR=0
+}
+
+check_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+ local prefix
+ local rtsrc_nsname
+
+ rtsrc_nsname="$(get_rtname "${rtsrc}")"
+
+ prefix="$(get_network_prefix "${rtsrc}" "${rtdst}")"
+
+ ip netns exec "${rtsrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${prefix}::${rtdst}" >/dev/null 2>&1
+}
+
+check_and_log_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+
+ check_rt_connectivity "${rtsrc}" "${rtdst}"
+ log_test $? 0 "Routers connectivity: rt-${rtsrc} -> rt-${rtdst}"
+}
+
+check_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv6_HS_NETWORK}::${hsdst}" >/dev/null 2>&1
+}
+
+check_hs_ipv4_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv4_HS_NETWORK}.${hsdst}" >/dev/null 2>&1
+}
+
+check_and_log_hs2gw_connectivity()
+{
+ local hssrc="$1"
+
+ check_hs_ipv6_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> gw"
+
+ check_hs_ipv4_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> gw"
+}
+
+check_and_log_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_ipv4_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+router_tests()
+{
+ local i
+ local j
+
+ log_section "IPv6 routers connectivity test"
+
+ for i in ${ROUTERS}; do
+ for j in ${ROUTERS}; do
+ if [ "${i}" -eq "${j}" ]; then
+ continue
+ fi
+
+ check_and_log_rt_connectivity "${i}" "${j}"
+ done
+ done
+}
+
+host2gateway_tests()
+{
+ local hs
+
+ log_section "IPv4/IPv6 connectivity test among hosts and gateways"
+
+ for hs in ${HOSTS}; do
+ check_and_log_hs2gw_connectivity "${hs}"
+ done
+}
+
+host_vpn_tests()
+{
+ log_section "SRv6 VPN connectivity test hosts (h1 <-> h2, IPv6)"
+
+ check_and_log_hs_ipv6_connectivity 1 2
+ check_and_log_hs_ipv6_connectivity 2 1
+
+ log_section "SRv6 VPN connectivity test hosts (h1 <-> h2, IPv4)"
+
+ check_and_log_hs_ipv4_connectivity 1 2
+ check_and_log_hs_ipv4_connectivity 2 1
+}
+
+__nextcsid_end_x_behavior_test()
+{
+ local nsname="$1"
+ local cmd="$2"
+ local blen="$3"
+ local flen="$4"
+ local layout=""
+
+ if [ "${blen}" != "d" ]; then
+ layout="${layout} lblen ${blen}"
+ fi
+
+ if [ "${flen}" != "d" ]; then
+ layout="${layout} nflen ${flen}"
+ fi
+
+ ip -netns "${nsname}" -6 route \
+ "${cmd}" "${CSID_CNTR_PREFIX}" \
+ table "${CSID_CNTR_RT_TABLE}" \
+ encap seg6local action End.X nh6 :: \
+ flavors next-csid ${layout} \
+ dev "${DUMMY_DEVNAME}" &>/dev/null
+
+ return "$?"
+}
+
+rt_x_nextcsid_end_x_behavior_test()
+{
+ local rt="$1"
+ local blen="$2"
+ local flen="$3"
+ local nsname
+ local ret
+
+ nsname="$(get_rtname "${rt}")"
+
+ __nextcsid_end_x_behavior_test "${nsname}" "add" "${blen}" "${flen}"
+ ret="$?"
+ __nextcsid_end_x_behavior_test "${nsname}" "del" "${blen}" "${flen}"
+
+ return "${ret}"
+}
+
+__parse_csid_container_cfg()
+{
+ local cfg="$1"
+ local index="$2"
+ local out
+
+ echo "${cfg}" | cut -d',' -f"${index}"
+}
+
+csid_container_cfg_tests()
+{
+ local valid
+ local blen
+ local flen
+ local cfg
+ local ret
+
+ log_section "C-SID Container config tests (legend: d='kernel default')"
+
+ for cfg in "${CSID_CONTAINER_CFGS[@]}"; do
+ blen="$(__parse_csid_container_cfg "${cfg}" 1)"
+ flen="$(__parse_csid_container_cfg "${cfg}" 2)"
+ valid="$(__parse_csid_container_cfg "${cfg}" 3)"
+
+ rt_x_nextcsid_end_x_behavior_test \
+ "${CSID_CNTR_RT_ID_TEST}" \
+ "${blen}" \
+ "${flen}"
+ ret="$?"
+
+ if [ "${valid}" == "y" ]; then
+ log_test "${ret}" 0 \
+ "Accept valid C-SID container cfg (lblen=${blen}, nflen=${flen})"
+ else
+ log_test "${ret}" 2 \
+ "Reject invalid C-SID container cfg (lblen=${blen}, nflen=${flen})"
+ fi
+ done
+}
+
+test_iproute2_supp_or_ksft_skip()
+{
+ if ! ip route help 2>&1 | grep -qo "next-csid"; then
+ echo "SKIP: Missing SRv6 NEXT-C-SID flavor support in iproute2"
+ exit "${ksft_skip}"
+ fi
+}
+
+test_dummy_dev_or_ksft_skip()
+{
+ local test_netns
+
+ test_netns="dummy-$(mktemp -u XXXXXXXX)"
+
+ if ! ip netns add "${test_netns}"; then
+ echo "SKIP: Cannot set up netns for testing dummy dev support"
+ exit "${ksft_skip}"
+ fi
+
+ modprobe dummy &>/dev/null || true
+ if ! ip -netns "${test_netns}" link \
+ add "${DUMMY_DEVNAME}" type dummy; then
+ echo "SKIP: dummy dev not supported"
+
+ ip netns del "${test_netns}"
+ exit "${ksft_skip}"
+ fi
+
+ ip netns del "${test_netns}"
+}
+
+test_vrf_or_ksft_skip()
+{
+ modprobe vrf &>/dev/null || true
+ if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
+ echo "SKIP: vrf sysctl does not exist"
+ exit "${ksft_skip}"
+ fi
+}
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "SKIP: Need root privileges"
+ exit "${ksft_skip}"
+fi
+
+# required programs to carry out this selftest
+test_command_or_ksft_skip ip
+test_command_or_ksft_skip ping
+test_command_or_ksft_skip sysctl
+test_command_or_ksft_skip grep
+test_command_or_ksft_skip cut
+
+test_iproute2_supp_or_ksft_skip
+test_dummy_dev_or_ksft_skip
+test_vrf_or_ksft_skip
+
+set -e
+trap cleanup EXIT
+
+setup
+set +e
+
+csid_container_cfg_tests
+
+router_tests
+host2gateway_tests
+host_vpn_tests
+
+print_log_test_results