aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml2
-rw-r--r--Documentation/netlink/genetlink-c.yaml3
-rw-r--r--Documentation/netlink/genetlink-legacy.yaml7
-rw-r--r--Documentation/netlink/genetlink.yaml3
-rw-r--r--Documentation/netlink/netlink-raw.yaml3
-rw-r--r--Documentation/netlink/specs/devlink.yaml1548
-rw-r--r--Documentation/netlink/specs/mptcp.yaml391
-rw-r--r--Documentation/networking/dsa/b53.rst14
-rw-r--r--Documentation/networking/dsa/bcm_sf2.rst2
-rw-r--r--Documentation/networking/dsa/configuration.rst102
-rw-r--r--Documentation/networking/dsa/dsa.rst162
-rw-r--r--Documentation/networking/dsa/lan9303.rst2
-rw-r--r--Documentation/networking/dsa/sja1105.rst6
-rw-r--r--Documentation/networking/ip-sysctl.rst10
-rw-r--r--Documentation/networking/mptcp-sysctl.rst11
-rw-r--r--Documentation/networking/page_pool.rst4
-rw-r--r--Documentation/userspace-api/netlink/genetlink-legacy.rst2
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi2
-rw-r--r--drivers/bluetooth/btmtksdio.c44
-rw-r--r--drivers/bluetooth/btqca.c68
-rw-r--r--drivers/bluetooth/btqca.h5
-rw-r--r--drivers/bluetooth/btusb.c11
-rw-r--r--drivers/bluetooth/hci_bcm4377.c5
-rw-r--r--drivers/bluetooth/hci_qca.c11
-rw-r--r--drivers/net/bareudp.c13
-rw-r--r--drivers/net/dsa/b53/b53_common.c4
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c41
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c4
-rw-r--r--drivers/net/dsa/lan9303-core.c4
-rw-r--r--drivers/net/dsa/lantiq_gswip.c34
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c106
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c48
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h5
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c2
-rw-r--r--drivers/net/dsa/mt7530.c18
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c4
-rw-r--r--drivers/net/dsa/ocelot/felix.c68
-rw-r--r--drivers/net/dsa/ocelot/felix.h6
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c50
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c4
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c6
-rw-r--r--drivers/net/dsa/qca/qca8k.h2
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c28
-rw-r--r--drivers/net/dsa/realtek/realtek.h2
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c4
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c12
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c2
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c158
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h16
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c45
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h8
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1
-rw-r--r--drivers/net/ethernet/renesas/Kconfig9
-rw-r--r--drivers/net/ethernet/renesas/Makefile4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c22
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c14
-rw-r--r--drivers/net/geneve.c96
-rw-r--r--drivers/net/mdio/mdio-xgene.c2
-rw-r--r--drivers/net/veth.c25
-rw-r--r--drivers/net/vxlan/vxlan_core.c136
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c2
-rw-r--r--drivers/s390/net/ctcm_main.c4
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--include/linux/dsa/sja1105.h2
-rw-r--r--include/linux/tcp.h9
-rw-r--r--include/net/bluetooth/hci.h3
-rw-r--r--include/net/bluetooth/hci_core.h40
-rw-r--r--include/net/bluetooth/hci_sync.h2
-rw-r--r--include/net/dsa.h56
-rw-r--r--include/net/dsa_stubs.h22
-rw-r--r--include/net/dst.h7
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/inet_timewait_sock.h3
-rw-r--r--include/net/ipv6.h6
-rw-r--r--include/net/page_pool/helpers.h210
-rw-r--r--include/net/page_pool/types.h6
-rw-r--r--include/net/tc_act/tc_ct.h1
-rw-r--r--include/net/tcp.h71
-rw-r--r--include/net/udp_tunnel.h8
-rw-r--r--include/uapi/linux/devlink.h2
-rw-r--r--include/uapi/linux/if_link.h4
-rw-r--r--include/uapi/linux/mptcp.h174
-rw-r--r--include/uapi/linux/mptcp_pm.h150
-rw-r--r--include/uapi/linux/rtnetlink.h18
-rw-r--r--include/uapi/linux/tcp.h1
-rw-r--r--net/atm/atm_sysfs.c2
-rw-r--r--net/bluetooth/amp.c3
-rw-r--r--net/bluetooth/hci_conn.c123
-rw-r--r--net/bluetooth/hci_core.c3
-rw-r--r--net/bluetooth/hci_event.c92
-rw-r--r--net/bluetooth/hci_sync.c36
-rw-r--r--net/bluetooth/hci_sysfs.c23
-rw-r--r--net/bluetooth/iso.c38
-rw-r--r--net/bluetooth/msft.c20
-rw-r--r--net/core/dev.c120
-rw-r--r--net/core/dev_ioctl.c2
-rw-r--r--net/core/page_pool.c17
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c50
-rw-r--r--net/devlink/dev.c10
-rw-r--r--net/devlink/devl_internal.h64
-rw-r--r--net/devlink/dpipe.c14
-rw-r--r--net/devlink/health.c24
-rw-r--r--net/devlink/linecard.c3
-rw-r--r--net/devlink/netlink.c328
-rw-r--r--net/devlink/netlink_gen.c757
-rw-r--r--net/devlink/netlink_gen.h64
-rw-r--r--net/devlink/param.c14
-rw-r--r--net/devlink/port.c11
-rw-r--r--net/devlink/rate.c6
-rw-r--r--net/devlink/region.c8
-rw-r--r--net/devlink/resource.c4
-rw-r--r--net/devlink/sb.c17
-rw-r--r--net/devlink/trap.c9
-rw-r--r--net/dsa/Makefile6
-rw-r--r--net/dsa/conduit.c (renamed from net/dsa/master.c)118
-rw-r--r--net/dsa/conduit.h22
-rw-r--r--net/dsa/dsa.c224
-rw-r--r--net/dsa/dsa.h12
-rw-r--r--net/dsa/master.h22
-rw-r--r--net/dsa/netlink.c22
-rw-r--r--net/dsa/port.c124
-rw-r--r--net/dsa/port.h4
-rw-r--r--net/dsa/slave.h69
-rw-r--r--net/dsa/switch.c20
-rw-r--r--net/dsa/switch.h8
-rw-r--r--net/dsa/tag.c10
-rw-r--r--net/dsa/tag.h26
-rw-r--r--net/dsa/tag_8021q.c22
-rw-r--r--net/dsa/tag_8021q.h2
-rw-r--r--net/dsa/tag_ar9331.c4
-rw-r--r--net/dsa/tag_brcm.c14
-rw-r--r--net/dsa/tag_dsa.c6
-rw-r--r--net/dsa/tag_gswip.c4
-rw-r--r--net/dsa/tag_hellcreek.c4
-rw-r--r--net/dsa/tag_ksz.c12
-rw-r--r--net/dsa/tag_lan9303.c4
-rw-r--r--net/dsa/tag_mtk.c4
-rw-r--r--net/dsa/tag_none.c6
-rw-r--r--net/dsa/tag_ocelot.c22
-rw-r--r--net/dsa/tag_ocelot_8021q.c12
-rw-r--r--net/dsa/tag_qca.c6
-rw-r--r--net/dsa/tag_rtl4_a.c6
-rw-r--r--net/dsa/tag_rtl8_4.c6
-rw-r--r--net/dsa/tag_rzn1_a5psw.c4
-rw-r--r--net/dsa/tag_sja1105.c30
-rw-r--r--net/dsa/tag_trailer.c4
-rw-r--r--net/dsa/tag_xrs700x.c4
-rw-r--r--net/dsa/user.c (renamed from net/dsa/slave.c)1464
-rw-r--r--net/dsa/user.h69
-rw-r--r--net/ipv4/syncookies.c32
-rw-r--r--net/ipv4/tcp.c26
-rw-r--r--net/ipv4/tcp_input.c52
-rw-r--r--net/ipv4/tcp_ipv4.c5
-rw-r--r--net/ipv4/tcp_lp.c2
-rw-r--r--net/ipv4/tcp_minisocks.c19
-rw-r--r--net/ipv4/tcp_output.c34
-rw-r--r--net/ipv4/tcp_timer.c44
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/ip6_output.c113
-rw-r--r--net/ipv6/ip6_udp_tunnel.c70
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/mptcp/Makefile3
-rw-r--r--net/mptcp/ctrl.c16
-rw-r--r--net/mptcp/fastopen.c1
-rw-r--r--net/mptcp/mptcp_pm_gen.c179
-rw-r--r--net/mptcp/mptcp_pm_gen.h58
-rw-r--r--net/mptcp/pm_netlink.c114
-rw-r--r--net/mptcp/pm_userspace.c8
-rw-r--r--net/mptcp/protocol.c69
-rw-r--r--net/mptcp/protocol.h88
-rw-r--r--net/mptcp/sockopt.c65
-rw-r--r--net/mptcp/subflow.c46
-rw-r--r--net/netfilter/nf_synproxy_core.c2
-rw-r--r--net/netlink/genetlink.c3
-rw-r--r--net/sched/act_ct.c41
-rw-r--r--net/sched/sch_fq.c6
-rw-r--r--net/sched/sch_qfq.c2
-rw-r--r--net/tls/tls.h3
-rw-r--r--net/tls/tls_device.c2
-rw-r--r--net/tls/tls_sw.c10
-rw-r--r--tools/net/ynl/generated/devlink-user.c3861
-rw-r--r--tools/net/ynl/generated/devlink-user.h3301
-rw-r--r--tools/net/ynl/generated/ethtool-user.h82
-rw-r--r--tools/net/ynl/generated/fou-user.h2
-rw-r--r--tools/net/ynl/generated/handshake-user.h2
-rw-r--r--tools/net/ynl/generated/netdev-user.h4
-rw-r--r--tools/net/ynl/lib/ynl.c6
-rw-r--r--tools/net/ynl/lib/ynl.h5
-rw-r--r--tools/net/ynl/lib/ynl.py13
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py80
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c4
-rwxr-xr-xtools/testing/selftests/net/route_localnet.sh6
216 files changed, 13703 insertions, 3434 deletions
diff --git a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
index 5038818e9f2e..1c2444121e60 100644
--- a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
@@ -60,7 +60,7 @@ description: |
Check out example 6.
- - Port 5 can be wired to an external phy. Port 5 becomes a DSA slave.
+ - Port 5 can be wired to an external phy. Port 5 becomes a DSA user port.
For the multi-chip module MT7530, the external phy must be wired TX to TX
to gmac1 of the SoC for this to work. Ubiquiti EdgeRouter X SFP is wired
diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
index 41014f5c01c4..b3029c64d0d5 100644
--- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -38,6 +38,8 @@ properties:
Should be a gpio specifier for a reset line.
maxItems: 1
+ wakeup-source: true
+
microchip,synclko-125:
$ref: /schemas/types.yaml#/definitions/flag
description:
diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml
index c72c8a428911..7ef2496d57c8 100644
--- a/Documentation/netlink/genetlink-c.yaml
+++ b/Documentation/netlink/genetlink-c.yaml
@@ -199,6 +199,9 @@ properties:
max-len:
description: Max length for a string or a binary attribute.
$ref: '#/$defs/len-or-define'
+ exact-len:
+ description: Exact length for a string or a binary attribute.
+ $ref: '#/$defs/len-or-define'
sub-type: *attr-type
display-hint: &display-hint
description: |
diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml
index 923de0ff1a9e..cd5ebe39b52c 100644
--- a/Documentation/netlink/genetlink-legacy.yaml
+++ b/Documentation/netlink/genetlink-legacy.yaml
@@ -192,7 +192,7 @@ properties:
type: string
type: &attr-type
description: The netlink attribute type
- enum: [ unused, pad, flag, binary,
+ enum: [ unused, pad, flag, binary, bitfield32,
uint, sint, u8, u16, u32, u64, s32, s64,
string, nest, array-nest, nest-type-value ]
doc:
@@ -242,6 +242,9 @@ properties:
max-len:
description: Max length for a string or a binary attribute.
$ref: '#/$defs/len-or-define'
+ exact-len:
+ description: Exact length for a string or a binary attribute.
+ $ref: '#/$defs/len-or-define'
sub-type: *attr-type
display-hint: *display-hint
# Start genetlink-c
@@ -337,7 +340,7 @@ properties:
description: Command flags.
type: array
items:
- enum: [ admin-perm ]
+ enum: [ admin-perm, uns-admin-perm ]
dont-validate:
description: Kernel attribute validation flags.
type: array
diff --git a/Documentation/netlink/genetlink.yaml b/Documentation/netlink/genetlink.yaml
index 9ceb096b2df2..501ed2e6c8ef 100644
--- a/Documentation/netlink/genetlink.yaml
+++ b/Documentation/netlink/genetlink.yaml
@@ -172,6 +172,9 @@ properties:
max-len:
description: Max length for a string or a binary attribute.
$ref: '#/$defs/len-or-define'
+ exact-len:
+ description: Exact length for a string or a binary attribute.
+ $ref: '#/$defs/len-or-define'
sub-type: *attr-type
display-hint: &display-hint
description: |
diff --git a/Documentation/netlink/netlink-raw.yaml b/Documentation/netlink/netlink-raw.yaml
index d976851b80f8..48db31f1d059 100644
--- a/Documentation/netlink/netlink-raw.yaml
+++ b/Documentation/netlink/netlink-raw.yaml
@@ -240,6 +240,9 @@ properties:
max-len:
description: Max length for a string or a binary attribute.
$ref: '#/$defs/len-or-define'
+ exact-len:
+ description: Exact length for a string or a binary attribute.
+ $ref: '#/$defs/len-or-define'
sub-type: *attr-type
display-hint: *display-hint
# Start genetlink-c
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index dec130d2507c..c6ba4889575a 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -15,6 +15,161 @@ definitions:
name: ingress
-
name: egress
+ -
+ type: enum
+ name: port-type
+ entries:
+ -
+ name: notset
+ -
+ name: auto
+ -
+ name: eth
+ -
+ name: ib
+ -
+ type: enum
+ name: port-flavour
+ entries:
+ -
+ name: physical
+ -
+ name: cpu
+ -
+ name: dsa
+ -
+ name: pci_pf
+ -
+ name: pci_vf
+ -
+ name: virtual
+ -
+ name: unused
+ -
+ name: pci_sf
+ -
+ type: enum
+ name: port-fn-state
+ entries:
+ -
+ name: inactive
+ -
+ name: active
+ -
+ type: enum
+ name: port-fn-opstate
+ entries:
+ -
+ name: detached
+ -
+ name: attached
+ -
+ type: enum
+ name: port-fn-attr-cap
+ entries:
+ -
+ name: roce-bit
+ -
+ name: migratable-bit
+ -
+ type: enum
+ name: sb-threshold-type
+ entries:
+ -
+ name: static
+ -
+ name: dynamic
+ -
+ type: enum
+ name: eswitch-mode
+ entries:
+ -
+ name: legacy
+ -
+ name: switchdev
+ -
+ type: enum
+ name: eswitch-inline-mode
+ entries:
+ -
+ name: none
+ -
+ name: link
+ -
+ name: network
+ -
+ name: transport
+ -
+ type: enum
+ name: eswitch-encap-mode
+ entries:
+ -
+ name: none
+ -
+ name: basic
+ -
+ type: enum
+ name: dpipe-match-type
+ entries:
+ -
+ name: field-exact
+ -
+ type: enum
+ name: dpipe-action-type
+ entries:
+ -
+ name: field-modify
+ -
+ type: enum
+ name: dpipe-field-mapping-type
+ entries:
+ -
+ name: none
+ -
+ name: ifindex
+ -
+ type: enum
+ name: resource-unit
+ entries:
+ -
+ name: entry
+ -
+ type: enum
+ name: reload-action
+ entries:
+ -
+ name: driver-reinit
+ value: 1
+ -
+ name: fw-activate
+ -
+ type: enum
+ name: param-cmode
+ entries:
+ -
+ name: runtime
+ -
+ name: driverinit
+ -
+ name: permanent
+ -
+ type: enum
+ name: flash-overwrite
+ entries:
+ -
+ name: settings-bit
+ -
+ name: identifiers-bit
+ -
+ type: enum
+ name: trap-action
+ entries:
+ -
+ name: drop
+ -
+ name: trap
+ -
+ name: mirror
attribute-sets:
-
@@ -31,6 +186,17 @@ attribute-sets:
-
name: port-index
type: u32
+ -
+ name: port-type
+ type: u16
+ enum: port-type
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: port-split-count
+ type: u32
+ value: 9
# TODO: fill in the attributes in between
@@ -45,18 +211,224 @@ attribute-sets:
name: sb-pool-index
type: u16
value: 17
-
-
name: sb-pool-type
type: u8
enum: sb-pool-type
+ -
+ name: sb-pool-size
+ type: u32
+ -
+ name: sb-pool-threshold-type
+ type: u8
+ enum: sb-threshold-type
+ -
+ name: sb-threshold
+ type: u32
+ -
+ name: sb-tc-index
+ type: u16
+ value: 22
# TODO: fill in the attributes in between
-
- name: sb-tc-index
+ name: eswitch-mode
type: u16
- value: 22
+ value: 25
+ enum: eswitch-mode
+
+ -
+ name: eswitch-inline-mode
+ type: u16
+ enum: eswitch-inline-mode
+ -
+ name: dpipe-tables
+ type: nest
+ nested-attributes: dl-dpipe-tables
+ -
+ name: dpipe-table
+ type: nest
+ multi-attr: true
+ nested-attributes: dl-dpipe-table
+ -
+ name: dpipe-table-name
+ type: string
+ -
+ name: dpipe-table-size
+ type: u64
+ -
+ name: dpipe-table-matches
+ type: nest
+ nested-attributes: dl-dpipe-table-matches
+ -
+ name: dpipe-table-actions
+ type: nest
+ nested-attributes: dl-dpipe-table-actions
+ -
+ name: dpipe-table-counters-enabled
+ type: u8
+ -
+ name: dpipe-entries
+ type: nest
+ nested-attributes: dl-dpipe-entries
+ -
+ name: dpipe-entry
+ type: nest
+ multi-attr: true
+ nested-attributes: dl-dpipe-entry
+ -
+ name: dpipe-entry-index
+ type: u64
+ -
+ name: dpipe-entry-match-values
+ type: nest
+ nested-attributes: dl-dpipe-entry-match-values
+ -
+ name: dpipe-entry-action-values
+ type: nest
+ nested-attributes: dl-dpipe-entry-action-values
+ -
+ name: dpipe-entry-counter
+ type: u64
+ -
+ name: dpipe-match
+ type: nest
+ multi-attr: true
+ nested-attributes: dl-dpipe-match
+ -
+ name: dpipe-match-value
+ type: nest
+ multi-attr: true
+ nested-attributes: dl-dpipe-match-value
+ -
+ name: dpipe-match-type
+ type: u32
+ enum: dpipe-match-type
+ -
+ name: dpipe-action
+ type: nest
+ multi-attr: true
+ nested-attributes: dl-dpipe-action
+ -
+ name: dpipe-action-value
+ type: nest
+ multi-attr: true
+ nested-attributes: dl-dpipe-action-value
+ -
+ name: dpipe-action-type
+ type: u32
+ enum: dpipe-action-type
+ -
+ name: dpipe-value
+ type: binary
+ -
+ name: dpipe-value-mask
+ type: binary
+ -
+ name: dpipe-value-mapping
+ type: u32
+ -
+ name: dpipe-headers
+ type: nest
+ nested-attributes: dl-dpipe-headers
+ -
+ name: dpipe-header
+ type: nest
+ multi-attr: true
+ nested-attributes: dl-dpipe-header
+ -
+ name: dpipe-header-name
+ type: string
+ -
+ name: dpipe-header-id
+ type: u32
+ -
+ name: dpipe-header-fields
+ type: nest
+ nested-attributes: dl-dpipe-header-fields
+ -
+ name: dpipe-header-global
+ type: u8
+ -
+ name: dpipe-header-index
+ type: u32
+ -
+ name: dpipe-field
+ type: nest
+ multi-attr: true
+ nested-attributes: dl-dpipe-field
+ -
+ name: dpipe-field-name
+ type: string
+ -
+ name: dpipe-field-id
+ type: u32
+ -
+ name: dpipe-field-bitwidth
+ type: u32
+ -
+ name: dpipe-field-mapping-type
+ type: u32
+ enum: dpipe-field-mapping-type
+ -
+ name: pad
+ type: pad
+ -
+ name: eswitch-encap-mode
+ type: u8
+ value: 62
+ enum: eswitch-encap-mode
+ -
+ name: resource-list
+ type: nest
+ nested-attributes: dl-resource-list
+ -
+ name: resource
+ type: nest
+ multi-attr: true
+ nested-attributes: dl-resource
+ -
+ name: resource-name
+ type: string
+ -
+ name: resource-id
+ type: u64
+ -
+ name: resource-size
+ type: u64
+ -
+ name: resource-size-new
+ type: u64
+ -
+ name: resource-size-valid
+ type: u8
+ -
+ name: resource-size-min
+ type: u64
+ -
+ name: resource-size-max
+ type: u64
+ -
+ name: resource-size-gran
+ type: u64
+ -
+ name: resource-unit
+ type: u8
+ enum: resource-unit
+ -
+ name: resource-occ
+ type: u64
+ -
+ name: dpipe-table-resource-id
+ type: u64
+ -
+ name: dpipe-table-resource-units
+ type: u64
+ -
+ name: port-flavour
+ type: u16
+ enum: port-flavour
# TODO: fill in the attributes in between
@@ -68,16 +440,40 @@ attribute-sets:
# TODO: fill in the attributes in between
-
+ name: param-type
+ type: u8
+ value: 83
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: param-value-cmode
+ type: u8
+ enum: param-cmode
+ value: 87
+ -
name: region-name
type: string
- value: 88
# TODO: fill in the attributes in between
-
+ name: region-snapshot-id
+ type: u32
+ value: 92
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: region-chunk-addr
+ type: u64
+ value: 96
+ -
+ name: region-chunk-len
+ type: u64
+ -
name: info-driver-name
type: string
- value: 98
-
name: info-serial-number
type: string
@@ -106,6 +502,29 @@ attribute-sets:
# TODO: fill in the attributes in between
-
+ name: fmsg
+ type: nest
+ nested-attributes: dl-fmsg
+ value: 106
+ -
+ name: fmsg-obj-nest-start
+ type: flag
+ -
+ name: fmsg-pair-nest-start
+ type: flag
+ -
+ name: fmsg-arr-nest-start
+ type: flag
+ -
+ name: fmsg-nest-end
+ type: flag
+ -
+ name: fmsg-obj-name
+ type: string
+
+ # TODO: fill in the attributes in between
+
+ -
name: health-reporter-name
type: string
value: 115
@@ -113,9 +532,36 @@ attribute-sets:
# TODO: fill in the attributes in between
-
+ name: health-reporter-graceful-period
+ type: u64
+ value: 120
+ -
+ name: health-reporter-auto-recover
+ type: u8
+ -
+ name: flash-update-file-name
+ type: string
+ -
+ name: flash-update-component
+ type: string
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: port-pci-pf-number
+ type: u16
+ value: 127
+
+ # TODO: fill in the attributes in between
+
+ -
name: trap-name
type: string
value: 130
+ -
+ name: trap-action
+ type: u8
+ enum: trap-action
# TODO: fill in the attributes in between
@@ -131,23 +577,68 @@ attribute-sets:
# TODO: fill in the attributes in between
-
- name: trap-policer-id
+ name: netns-fd
+ type: u32
+ value: 138
+ -
+ name: netns-pid
+ type: u32
+ -
+ name: netns-id
type: u32
- value: 142
# TODO: fill in the attributes in between
-
- name: reload-action
+ name: health-reporter-auto-dump
type: u8
- value: 153
+ value: 141
+ -
+ name: trap-policer-id
+ type: u32
+ -
+ name: trap-policer-rate
+ type: u64
+ -
+ name: trap-policer-burst
+ type: u64
+ -
+ name: port-function
+ type: nest
+ nested-attributes: dl-port-function
# TODO: fill in the attributes in between
-
+ name: port-controller-number
+ type: u32
+ value: 150
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: flash-update-overwrite-mask
+ type: bitfield32
+ enum: flash-overwrite
+ enum-as-flags: True
+ value: 152
+ -
+ name: reload-action
+ type: u8
+ enum: reload-action
+ -
+ name: reload-actions-performed
+ type: bitfield32
+ enum: reload-action
+ enum-as-flags: True
+ -
+ name: reload-limits
+ type: bitfield32
+ enum: reload-action
+ enum-as-flags: True
+ -
name: dev-stats
type: nest
- value: 156
nested-attributes: dl-dev-stats
-
name: reload-stats
@@ -182,9 +673,25 @@ attribute-sets:
# TODO: fill in the attributes in between
-
+ name: port-pci-sf-number
+ type: u32
+ value: 164
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: rate-tx-share
+ type: u64
+ value: 166
+ -
+ name: rate-tx-max
+ type: u64
+ -
name: rate-node-name
type: string
- value: 168
+ -
+ name: rate-parent-node-name
+ type: string
# TODO: fill in the attributes in between
@@ -193,6 +700,30 @@ attribute-sets:
type: u32
value: 171
+ # TODO: fill in the attributes in between
+
+ -
+ name: linecard-type
+ type: string
+ value: 173
+
+ # TODO: fill in the attributes in between
+
+ -
+ name: selftests
+ type: nest
+ value: 176
+ nested-attributes: dl-selftest-id
+ -
+ name: rate-tx-priority
+ type: u32
+ -
+ name: rate-tx-weight
+ type: u32
+ -
+ name: region-direct
+ type: flag
+
-
name: dl-dev-stats
subset-of: devlink
@@ -237,6 +768,261 @@ attribute-sets:
name: info-version-name
-
name: info-version-value
+ -
+ name: dl-port-function
+ name-prefix: devlink-port-fn-attr-
+ attr-max-name: devlink-port-function-attr-max
+ attributes:
+ -
+ name-prefix: devlink-port-function-attr-
+ name: hw-addr
+ type: binary
+ value: 1
+ -
+ name: state
+ type: u8
+ enum: port-fn-state
+ -
+ name: opstate
+ type: u8
+ enum: port-fn-opstate
+ -
+ name: caps
+ type: bitfield32
+ enum: port-fn-attr-cap
+ enum-as-flags: True
+
+ -
+ name: dl-dpipe-tables
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-table
+
+ -
+ name: dl-dpipe-table
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-table-name
+ -
+ name: dpipe-table-size
+ -
+ name: dpipe-table-name
+ -
+ name: dpipe-table-size
+ -
+ name: dpipe-table-matches
+ -
+ name: dpipe-table-actions
+ -
+ name: dpipe-table-counters-enabled
+ -
+ name: dpipe-table-resource-id
+ -
+ name: dpipe-table-resource-units
+
+ -
+ name: dl-dpipe-table-matches
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-match
+
+ -
+ name: dl-dpipe-table-actions
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-action
+
+ -
+ name: dl-dpipe-entries
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-entry
+
+ -
+ name: dl-dpipe-entry
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-entry-index
+ -
+ name: dpipe-entry-match-values
+ -
+ name: dpipe-entry-action-values
+ -
+ name: dpipe-entry-counter
+
+ -
+ name: dl-dpipe-entry-match-values
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-match-value
+
+ -
+ name: dl-dpipe-entry-action-values
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-action-value
+
+ -
+ name: dl-dpipe-match
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-match-type
+ -
+ name: dpipe-header-id
+ -
+ name: dpipe-header-global
+ -
+ name: dpipe-header-index
+ -
+ name: dpipe-field-id
+
+ -
+ name: dl-dpipe-match-value
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-match
+ -
+ name: dpipe-value
+ -
+ name: dpipe-value-mask
+ -
+ name: dpipe-value-mapping
+
+ -
+ name: dl-dpipe-action
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-action-type
+ -
+ name: dpipe-header-id
+ -
+ name: dpipe-header-global
+ -
+ name: dpipe-header-index
+ -
+ name: dpipe-field-id
+
+ -
+ name: dl-dpipe-action-value
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-action
+ -
+ name: dpipe-value
+ -
+ name: dpipe-value-mask
+ -
+ name: dpipe-value-mapping
+
+ -
+ name: dl-dpipe-headers
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-header
+
+ -
+ name: dl-dpipe-header
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-header-name
+ -
+ name: dpipe-header-id
+ -
+ name: dpipe-header-global
+ -
+ name: dpipe-header-fields
+
+ -
+ name: dl-dpipe-header-fields
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-field
+
+ -
+ name: dl-dpipe-field
+ subset-of: devlink
+ attributes:
+ -
+ name: dpipe-field-name
+ -
+ name: dpipe-field-id
+ -
+ name: dpipe-field-bitwidth
+ -
+ name: dpipe-field-mapping-type
+
+ -
+ name: dl-resource
+ subset-of: devlink
+ attributes:
+ # -
+ # name: resource-list
+ # This is currently unsupported due to circular dependency
+ -
+ name: resource-name
+ -
+ name: resource-id
+ -
+ name: resource-size
+ -
+ name: resource-size-new
+ -
+ name: resource-size-valid
+ -
+ name: resource-size-min
+ -
+ name: resource-size-max
+ -
+ name: resource-size-gran
+ -
+ name: resource-unit
+ -
+ name: resource-occ
+
+ -
+ name: dl-resource-list
+ subset-of: devlink
+ attributes:
+ -
+ name: resource
+
+ -
+ name: dl-fmsg
+ subset-of: devlink
+ attributes:
+ -
+ name: fmsg-obj-nest-start
+ -
+ name: fmsg-pair-nest-start
+ -
+ name: fmsg-arr-nest-start
+ -
+ name: fmsg-nest-end
+ -
+ name: fmsg-obj-name
+
+ -
+ name: dl-selftest-id
+ name-prefix: devlink-attr-selftest-id-
+ attributes:
+ -
+ name: flash
+ type: flag
operations:
enum-model: directional
@@ -245,10 +1031,7 @@ operations:
name: get
doc: Get devlink instances.
attribute-set: devlink
- dont-validate:
- - strict
- - dump
-
+ dont-validate: [ strict, dump ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -263,7 +1046,6 @@ operations:
- bus-name
- dev-name
- reload-failed
- - reload-action
- dev-stats
dump:
reply: *get-reply
@@ -272,9 +1054,7 @@ operations:
name: port-get
doc: Get devlink port instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -293,16 +1073,90 @@ operations:
reply:
value: 3 # due to a bug, port dump returns DEVLINK_CMD_NEW
attributes: *port-id-attrs
+ -
+ name: port-set
+ doc: Set devlink port instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - port-index
+ - port-type
+ - port-function
- # TODO: fill in the operations in between
+ -
+ name: port-new
+ doc: Create devlink port instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - port-index
+ - port-flavour
+ - port-pci-pf-number
+ - port-pci-sf-number
+ - port-controller-number
+ reply:
+ value: 7
+ attributes: *port-id-attrs
+
+ -
+ name: port-del
+ doc: Delete devlink port instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ attributes: *port-id-attrs
+
+ -
+ name: port-split
+ doc: Split devlink port instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - port-index
+ - port-split-count
+
+ -
+ name: port-unsplit
+ doc: Unplit devlink port instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ attributes: *port-id-attrs
-
name: sb-get
doc: Get shared buffer instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -320,15 +1174,11 @@ operations:
attributes: *dev-id-attrs
reply: *sb-get-reply
- # TODO: fill in the operations in between
-
-
name: sb-pool-get
doc: Get shared buffer pool instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -347,15 +1197,29 @@ operations:
attributes: *dev-id-attrs
reply: *sb-pool-get-reply
- # TODO: fill in the operations in between
+ -
+ name: sb-pool-set
+ doc: Set shared buffer pool instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - sb-index
+ - sb-pool-index
+ - sb-pool-threshold-type
+ - sb-pool-size
-
name: sb-port-pool-get
doc: Get shared buffer port-pool combinations and threshold.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -375,15 +1239,29 @@ operations:
attributes: *dev-id-attrs
reply: *sb-port-pool-get-reply
- # TODO: fill in the operations in between
+ -
+ name: sb-port-pool-set
+ doc: Set shared buffer port-pool combinations and threshold.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - port-index
+ - sb-index
+ - sb-pool-index
+ - sb-threshold
-
name: sb-tc-pool-bind-get
doc: Get shared buffer port-TC to pool bindings and threshold.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -404,41 +1282,264 @@ operations:
attributes: *dev-id-attrs
reply: *sb-tc-pool-bind-get-reply
- # TODO: fill in the operations in between
+ -
+ name: sb-tc-pool-bind-set
+ doc: Set shared buffer port-TC to pool bindings and threshold.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - port-index
+ - sb-index
+ - sb-pool-index
+ - sb-pool-type
+ - sb-tc-index
+ - sb-threshold
+
+ -
+ name: sb-occ-snapshot
+ doc: Take occupancy snapshot of shared buffer.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ value: 27
+ attributes:
+ - bus-name
+ - dev-name
+ - sb-index
+
+ -
+ name: sb-occ-max-clear
+ doc: Clear occupancy watermarks of shared buffer.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - sb-index
+
+ -
+ name: eswitch-get
+ doc: Get eswitch attributes.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes: *dev-id-attrs
+ reply:
+ value: 29
+ attributes: &eswitch-attrs
+ - bus-name
+ - dev-name
+ - eswitch-mode
+ - eswitch-inline-mode
+ - eswitch-encap-mode
+
+ -
+ name: eswitch-set
+ doc: Set eswitch attributes.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes: *eswitch-attrs
+
+ -
+ name: dpipe-table-get
+ doc: Get dpipe table attributes.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - dpipe-table-name
+ reply:
+ value: 31
+ attributes:
+ - bus-name
+ - dev-name
+ - dpipe-tables
+
+ -
+ name: dpipe-entries-get
+ doc: Get dpipe entries attributes.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - dpipe-table-name
+ reply:
+ attributes:
+ - bus-name
+ - dev-name
+ - dpipe-entries
+
+ -
+ name: dpipe-headers-get
+ doc: Get dpipe headers attributes.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ reply:
+ attributes:
+ - bus-name
+ - dev-name
+ - dpipe-headers
+
+ -
+ name: dpipe-table-counters-set
+ doc: Set dpipe counter attributes.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - dpipe-table-name
+ - dpipe-table-counters-enabled
+
+ -
+ name: resource-set
+ doc: Set resource attributes.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - resource-id
+ - resource-size
+
+ -
+ name: resource-dump
+ doc: Get resource attributes.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ reply:
+ value: 36
+ attributes:
+ - bus-name
+ - dev-name
+ - resource-list
+
+ -
+ name: reload
+ doc: Reload devlink.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - reload-action
+ - reload-limits
+ - netns-pid
+ - netns-fd
+ - netns-id
+ reply:
+ attributes:
+ - bus-name
+ - dev-name
+ - reload-actions-performed
-
name: param-get
doc: Get param instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
request:
- value: 38
attributes: &param-id-attrs
- bus-name
- dev-name
- param-name
reply: &param-get-reply
- value: 38
attributes: *param-id-attrs
dump:
request:
attributes: *dev-id-attrs
reply: *param-get-reply
- # TODO: fill in the operations in between
+ -
+ name: param-set
+ doc: Set param instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - param-name
+ - param-type
+ # param-value-data is missing here as the type is variable
+ - param-value-cmode
-
name: region-get
doc: Get region instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -457,16 +1558,97 @@ operations:
attributes: *dev-id-attrs
reply: *region-get-reply
- # TODO: fill in the operations in between
+ -
+ name: region-new
+ doc: Create region snapshot.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port-optional
+ post: devlink-nl-post-doit
+ request:
+ value: 44
+ attributes: &region-snapshot-id-attrs
+ - bus-name
+ - dev-name
+ - port-index
+ - region-name
+ - region-snapshot-id
+ reply:
+ value: 44
+ attributes: *region-snapshot-id-attrs
+
+ -
+ name: region-del
+ doc: Delete region snapshot.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port-optional
+ post: devlink-nl-post-doit
+ request:
+ attributes: *region-snapshot-id-attrs
+
+ -
+ name: region-read
+ doc: Read region data.
+ attribute-set: devlink
+ dont-validate: [ dump-strict ]
+ flags: [ admin-perm ]
+ dump:
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - port-index
+ - region-name
+ - region-snapshot-id
+ - region-direct
+ - region-chunk-addr
+ - region-chunk-len
+ reply:
+ value: 46
+ attributes:
+ - bus-name
+ - dev-name
+ - port-index
+ - region-name
+
+ -
+ name: port-param-get
+ doc: Get port param instances.
+ attribute-set: devlink
+ dont-validate: [ strict, dump-strict ]
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ attributes: *port-id-attrs
+ reply:
+ attributes: *port-id-attrs
+ dump:
+ reply:
+ attributes: *port-id-attrs
+
+ -
+ name: port-param-set
+ doc: Set port param instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port
+ post: devlink-nl-post-doit
+ request:
+ attributes: *port-id-attrs
-
name: info-get
doc: Get device information, like driver name, hardware and firmware versions etc.
attribute-set: devlink
- dont-validate:
- - strict
- - dump
-
+ dont-validate: [ strict, dump ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -490,9 +1672,7 @@ operations:
name: health-reporter-get
doc: Get health reporter instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -509,15 +1689,97 @@ operations:
attributes: *port-id-attrs
reply: *health-reporter-get-reply
- # TODO: fill in the operations in between
+ -
+ name: health-reporter-set
+ doc: Set health reporter instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port-optional
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - port-index
+ - health-reporter-name
+ - health-reporter-graceful-period
+ - health-reporter-auto-recover
+ - health-reporter-auto-dump
+
+ -
+ name: health-reporter-recover
+ doc: Recover health reporter instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port-optional
+ post: devlink-nl-post-doit
+ request:
+ attributes: *health-reporter-id-attrs
+
+ -
+ name: health-reporter-diagnose
+ doc: Diagnose health reporter instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port-optional
+ post: devlink-nl-post-doit
+ request:
+ attributes: *health-reporter-id-attrs
+
+ -
+ name: health-reporter-dump-get
+ doc: Dump health reporter instances.
+ attribute-set: devlink
+ dont-validate: [ dump-strict ]
+ flags: [ admin-perm ]
+ dump:
+ request:
+ attributes: *health-reporter-id-attrs
+ reply:
+ value: 56
+ attributes:
+ - fmsg
+
+ -
+ name: health-reporter-dump-clear
+ doc: Clear dump of health reporter instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port-optional
+ post: devlink-nl-post-doit
+ request:
+ attributes: *health-reporter-id-attrs
+
+ -
+ name: flash-update
+ doc: Flash update devlink instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - flash-update-file-name
+ - flash-update-component
+ - flash-update-overwrite-mask
-
name: trap-get
doc: Get trap instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -535,15 +1797,27 @@ operations:
attributes: *dev-id-attrs
reply: *trap-get-reply
- # TODO: fill in the operations in between
+ -
+ name: trap-set
+ doc: Set trap instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - trap-name
+ - trap-action
-
name: trap-group-get
doc: Get trap group instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -561,15 +1835,28 @@ operations:
attributes: *dev-id-attrs
reply: *trap-group-get-reply
- # TODO: fill in the operations in between
+ -
+ name: trap-group-set
+ doc: Set trap group instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - trap-group-name
+ - trap-action
+ - trap-policer-id
-
name: trap-policer-get
doc: Get trap policer instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -587,15 +1874,41 @@ operations:
attributes: *dev-id-attrs
reply: *trap-policer-get-reply
- # TODO: fill in the operations in between
+ -
+ name: trap-policer-set
+ doc: Get trap policer instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - trap-policer-id
+ - trap-policer-rate
+ - trap-policer-burst
+
+ -
+ name: health-reporter-test
+ doc: Test health reporter instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit-port-optional
+ post: devlink-nl-post-doit
+ request:
+ value: 73
+ attributes: *health-reporter-id-attrs
-
name: rate-get
doc: Get rate instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -614,15 +1927,66 @@ operations:
attributes: *dev-id-attrs
reply: *rate-get-reply
- # TODO: fill in the operations in between
+ -
+ name: rate-set
+ doc: Set rate instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - rate-node-name
+ - rate-tx-share
+ - rate-tx-max
+ - rate-tx-priority
+ - rate-tx-weight
+ - rate-parent-node-name
+
+ -
+ name: rate-new
+ doc: Create rate instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - rate-node-name
+ - rate-tx-share
+ - rate-tx-max
+ - rate-tx-priority
+ - rate-tx-weight
+ - rate-parent-node-name
+
+ -
+ name: rate-del
+ doc: Delete rate instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - rate-node-name
-
name: linecard-get
doc: Get line card instances.
attribute-set: devlink
- dont-validate:
- - strict
-
+ dont-validate: [ strict ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -640,16 +2004,27 @@ operations:
attributes: *dev-id-attrs
reply: *linecard-get-reply
- # TODO: fill in the operations in between
+ -
+ name: linecard-set
+ doc: Set line card instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - linecard-index
+ - linecard-type
-
name: selftests-get
doc: Get device selftest instances.
attribute-set: devlink
- dont-validate:
- - strict
- - dump
-
+ dont-validate: [ strict, dump ]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -661,3 +2036,18 @@ operations:
attributes: *dev-id-attrs
dump:
reply: *selftests-get-reply
+
+ -
+ name: selftests-run
+ doc: Run device selftest instances.
+ attribute-set: devlink
+ dont-validate: [ strict ]
+ flags: [ admin-perm ]
+ do:
+ pre: devlink-nl-pre-doit
+ post: devlink-nl-post-doit
+ request:
+ attributes:
+ - bus-name
+ - dev-name
+ - selftests
diff --git a/Documentation/netlink/specs/mptcp.yaml b/Documentation/netlink/specs/mptcp.yaml
new file mode 100644
index 000000000000..ec5c454a87ea
--- /dev/null
+++ b/Documentation/netlink/specs/mptcp.yaml
@@ -0,0 +1,391 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: mptcp_pm
+protocol: genetlink-legacy
+doc: Multipath TCP.
+
+c-family-name: mptcp-pm-name
+c-version-name: mptcp-pm-ver
+max-by-define: true
+kernel-policy: per-op
+
+definitions:
+ -
+ type: enum
+ name: event-type
+ enum-name: mptcp-event-type
+ name-prefix: mptcp-event-
+ entries:
+ -
+ name: unspec
+ doc: unused event
+ -
+ name: created
+ doc:
+ token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
+ A new MPTCP connection has been created. It is the good time to
+ allocate memory and send ADD_ADDR if needed. Depending on the
+ traffic-patterns it can take a long time until the
+ MPTCP_EVENT_ESTABLISHED is sent.
+ -
+ name: established
+ doc:
+ token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
+ A MPTCP connection is established (can start new subflows).
+ -
+ name: closed
+ doc:
+ token
+ A MPTCP connection has stopped.
+ -
+ name: announced
+ value: 6
+ doc:
+ token, rem_id, family, daddr4 | daddr6 [, dport]
+ A new address has been announced by the peer.
+ -
+ name: removed
+ doc:
+ token, rem_id
+ An address has been lost by the peer.
+ -
+ name: sub-established
+ value: 10
+ doc:
+ token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
+ dport, backup, if_idx [, error]
+ A new subflow has been established. 'error' should not be set.
+ -
+ name: sub-closed
+ doc:
+ token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
+ dport, backup, if_idx [, error]
+ A subflow has been closed. An error (copy of sk_err) could be set if an
+ error has been detected for this subflow.
+ -
+ name: sub-priority
+ value: 13
+ doc:
+ token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
+ dport, backup, if_idx [, error]
+ The priority of a subflow has changed. 'error' should not be set.
+ -
+ name: listener-created
+ value: 15
+ doc:
+ family, sport, saddr4 | saddr6
+ A new PM listener is created.
+ -
+ name: listener-closed
+ doc:
+ family, sport, saddr4 | saddr6
+ A PM listener is closed.
+
+attribute-sets:
+ -
+ name: address
+ name-prefix: mptcp-pm-addr-attr-
+ attributes:
+ -
+ name: unspec
+ type: unused
+ value: 0
+ -
+ name: family
+ type: u16
+ -
+ name: id
+ type: u8
+ -
+ name: addr4
+ type: u32
+ byte-order: big-endian
+ -
+ name: addr6
+ type: binary
+ checks:
+ exact-len: 16
+ -
+ name: port
+ type: u16
+ byte-order: big-endian
+ -
+ name: flags
+ type: u32
+ -
+ name: if-idx
+ type: s32
+ -
+ name: subflow-attribute
+ name-prefix: mptcp-subflow-attr-
+ attributes:
+ -
+ name: unspec
+ type: unused
+ value: 0
+ -
+ name: token-rem
+ type: u32
+ -
+ name: token-loc
+ type: u32
+ -
+ name: relwrite-seq
+ type: u32
+ -
+ name: map-seq
+ type: u64
+ -
+ name: map-sfseq
+ type: u32
+ -
+ name: ssn-offset
+ type: u32
+ -
+ name: map-datalen
+ type: u16
+ -
+ name: flags
+ type: u32
+ -
+ name: id-rem
+ type: u8
+ -
+ name: id-loc
+ type: u8
+ -
+ name: pad
+ type: pad
+ -
+ name: endpoint
+ name-prefix: mptcp-pm-endpoint-
+ attributes:
+ -
+ name: addr
+ type: nest
+ nested-attributes: address
+ -
+ name: attr
+ name-prefix: mptcp-pm-attr-
+ attributes:
+ -
+ name: unspec
+ type: unused
+ value: 0
+ -
+ name: addr
+ type: nest
+ nested-attributes: address
+ -
+ name: rcv-add-addrs
+ type: u32
+ -
+ name: subflows
+ type: u32
+ -
+ name: token
+ type: u32
+ -
+ name: loc-id
+ type: u8
+ -
+ name: addr-remote
+ type: nest
+ nested-attributes: address
+ -
+ name: event-attr
+ enum-name: mptcp-event-attr
+ name-prefix: mptcp-attr-
+ attributes:
+ -
+ name: unspec
+ type: unused
+ value: 0
+ -
+ name: token
+ type: u32
+ -
+ name: family
+ type: u16
+ -
+ name: loc-id
+ type: u8
+ -
+ name: rem-id
+ type: u8
+ -
+ name: saddr4
+ type: u32
+ byte-order: big-endian
+ -
+ name: saddr6
+ type: binary
+ checks:
+ min-len: 16
+ -
+ name: daddr4
+ type: u32
+ byte-order: big-endian
+ -
+ name: daddr6
+ type: binary
+ checks:
+ min-len: 16
+ -
+ name: sport
+ type: u16
+ byte-order: big-endian
+ -
+ name: dport
+ type: u16
+ byte-order: big-endian
+ -
+ name: backup
+ type: u8
+ -
+ name: error
+ type: u8
+ -
+ name: flags
+ type: u16
+ -
+ name: timeout
+ type: u32
+ -
+ name: if_idx
+ type: u32
+ -
+ name: reset-reason
+ type: u32
+ -
+ name: reset-flags
+ type: u32
+ -
+ name: server-side
+ type: u8
+
+operations:
+ list:
+ -
+ name: unspec
+ doc: unused
+ value: 0
+ -
+ name: add-addr
+ doc: Add endpoint
+ attribute-set: endpoint
+ dont-validate: [ strict ]
+ flags: [ uns-admin-perm ]
+ do: &add-addr-attrs
+ request:
+ attributes:
+ - addr
+ -
+ name: del-addr
+ doc: Delete endpoint
+ attribute-set: endpoint
+ dont-validate: [ strict ]
+ flags: [ uns-admin-perm ]
+ do: *add-addr-attrs
+ -
+ name: get-addr
+ doc: Get endpoint information
+ attribute-set: endpoint
+ dont-validate: [ strict ]
+ flags: [ uns-admin-perm ]
+ do: &get-addr-attrs
+ request:
+ attributes:
+ - addr
+ reply:
+ attributes:
+ - addr
+ dump:
+ reply:
+ attributes:
+ - addr
+ -
+ name: flush-addrs
+ doc: flush addresses
+ attribute-set: endpoint
+ dont-validate: [ strict ]
+ flags: [ uns-admin-perm ]
+ do: *add-addr-attrs
+ -
+ name: set-limits
+ doc: Set protocol limits
+ attribute-set: attr
+ dont-validate: [ strict ]
+ flags: [ uns-admin-perm ]
+ do: &mptcp-limits
+ request:
+ attributes:
+ - rcv-add-addrs
+ - subflows
+ -
+ name: get-limits
+ doc: Get protocol limits
+ attribute-set: attr
+ dont-validate: [ strict ]
+ do: &mptcp-get-limits
+ request:
+ attributes:
+ - rcv-add-addrs
+ - subflows
+ reply:
+ attributes:
+ - rcv-add-addrs
+ - subflows
+ -
+ name: set-flags
+ doc: Change endpoint flags
+ attribute-set: attr
+ dont-validate: [ strict ]
+ flags: [ uns-admin-perm ]
+ do: &mptcp-set-flags
+ request:
+ attributes:
+ - addr
+ - token
+ - addr-remote
+ -
+ name: announce
+ doc: announce new sf
+ attribute-set: attr
+ dont-validate: [ strict ]
+ flags: [ uns-admin-perm ]
+ do: &announce-add
+ request:
+ attributes:
+ - addr
+ - token
+ -
+ name: remove
+ doc: announce removal
+ attribute-set: attr
+ dont-validate: [ strict ]
+ flags: [ uns-admin-perm ]
+ do:
+ request:
+ attributes:
+ - token
+ - loc-id
+ -
+ name: subflow-create
+ doc: todo
+ attribute-set: attr
+ dont-validate: [ strict ]
+ flags: [ uns-admin-perm ]
+ do: &sf-create
+ request:
+ attributes:
+ - addr
+ - token
+ - addr-remote
+ -
+ name: subflow-destroy
+ doc: todo
+ attribute-set: attr
+ dont-validate: [ strict ]
+ flags: [ uns-admin-perm ]
+ do: *sf-create
diff --git a/Documentation/networking/dsa/b53.rst b/Documentation/networking/dsa/b53.rst
index b41637cdb82b..1cb3ff648f88 100644
--- a/Documentation/networking/dsa/b53.rst
+++ b/Documentation/networking/dsa/b53.rst
@@ -52,7 +52,7 @@ VLAN programming would basically change the CPU port's default PVID and make
it untagged, undesirable.
In difference to the configuration described in :ref:`dsa-vlan-configuration`
-the default VLAN 1 has to be removed from the slave interface configuration in
+the default VLAN 1 has to be removed from the user interface configuration in
single port and gateway configuration, while there is no need to add an extra
VLAN configuration in the bridge showcase.
@@ -68,13 +68,13 @@ By default packages are tagged with vid 1:
ip link add link eth0 name eth0.2 type vlan id 2
ip link add link eth0 name eth0.3 type vlan id 3
- # The master interface needs to be brought up before the slave ports.
+ # The conduit interface needs to be brought up before the user ports.
ip link set eth0 up
ip link set eth0.1 up
ip link set eth0.2 up
ip link set eth0.3 up
- # bring up the slave interfaces
+ # bring up the user interfaces
ip link set wan up
ip link set lan1 up
ip link set lan2 up
@@ -113,11 +113,11 @@ bridge
# tag traffic on CPU port
ip link add link eth0 name eth0.1 type vlan id 1
- # The master interface needs to be brought up before the slave ports.
+ # The conduit interface needs to be brought up before the user ports.
ip link set eth0 up
ip link set eth0.1 up
- # bring up the slave interfaces
+ # bring up the user interfaces
ip link set wan up
ip link set lan1 up
ip link set lan2 up
@@ -149,12 +149,12 @@ gateway
ip link add link eth0 name eth0.1 type vlan id 1
ip link add link eth0 name eth0.2 type vlan id 2
- # The master interface needs to be brought up before the slave ports.
+ # The conduit interface needs to be brought up before the user ports.
ip link set eth0 up
ip link set eth0.1 up
ip link set eth0.2 up
- # bring up the slave interfaces
+ # bring up the user interfaces
ip link set wan up
ip link set lan1 up
ip link set lan2 up
diff --git a/Documentation/networking/dsa/bcm_sf2.rst b/Documentation/networking/dsa/bcm_sf2.rst
index dee234039e1e..d2571435696f 100644
--- a/Documentation/networking/dsa/bcm_sf2.rst
+++ b/Documentation/networking/dsa/bcm_sf2.rst
@@ -67,7 +67,7 @@ MDIO indirect accesses
----------------------
Due to a limitation in how Broadcom switches have been designed, external
-Broadcom switches connected to a SF2 require the use of the DSA slave MDIO bus
+Broadcom switches connected to a SF2 require the use of the DSA user MDIO bus
in order to properly configure them. By default, the SF2 pseudo-PHY address, and
an external switch pseudo-PHY address will both be snooping for incoming MDIO
transactions, since they are at the same address (30), resulting in some kind of
diff --git a/Documentation/networking/dsa/configuration.rst b/Documentation/networking/dsa/configuration.rst
index d2934c40f0f1..6cc4ded3cc23 100644
--- a/Documentation/networking/dsa/configuration.rst
+++ b/Documentation/networking/dsa/configuration.rst
@@ -31,38 +31,38 @@ at https://www.kernel.org/pub/linux/utils/net/iproute2/
Through DSA every port of a switch is handled like a normal linux Ethernet
interface. The CPU port is the switch port connected to an Ethernet MAC chip.
-The corresponding linux Ethernet interface is called the master interface.
-All other corresponding linux interfaces are called slave interfaces.
+The corresponding linux Ethernet interface is called the conduit interface.
+All other corresponding linux interfaces are called user interfaces.
-The slave interfaces depend on the master interface being up in order for them
-to send or receive traffic. Prior to kernel v5.12, the state of the master
+The user interfaces depend on the conduit interface being up in order for them
+to send or receive traffic. Prior to kernel v5.12, the state of the conduit
interface had to be managed explicitly by the user. Starting with kernel v5.12,
the behavior is as follows:
-- when a DSA slave interface is brought up, the master interface is
+- when a DSA user interface is brought up, the conduit interface is
automatically brought up.
-- when the master interface is brought down, all DSA slave interfaces are
+- when the conduit interface is brought down, all DSA user interfaces are
automatically brought down.
In this documentation the following Ethernet interfaces are used:
*eth0*
- the master interface
+ the conduit interface
*eth1*
- another master interface
+ another conduit interface
*lan1*
- a slave interface
+ a user interface
*lan2*
- another slave interface
+ another user interface
*lan3*
- a third slave interface
+ a third user interface
*wan*
- A slave interface dedicated for upstream traffic
+ A user interface dedicated for upstream traffic
Further Ethernet interfaces can be configured similar.
The configured IPs and networks are:
@@ -96,11 +96,11 @@ without using a VLAN based configuration.
ip addr add 192.0.2.5/30 dev lan2
ip addr add 192.0.2.9/30 dev lan3
- # For kernels earlier than v5.12, the master interface needs to be
- # brought up manually before the slave ports.
+ # For kernels earlier than v5.12, the conduit interface needs to be
+ # brought up manually before the user ports.
ip link set eth0 up
- # bring up the slave interfaces
+ # bring up the user interfaces
ip link set lan1 up
ip link set lan2 up
ip link set lan3 up
@@ -108,11 +108,11 @@ without using a VLAN based configuration.
*bridge*
.. code-block:: sh
- # For kernels earlier than v5.12, the master interface needs to be
- # brought up manually before the slave ports.
+ # For kernels earlier than v5.12, the conduit interface needs to be
+ # brought up manually before the user ports.
ip link set eth0 up
- # bring up the slave interfaces
+ # bring up the user interfaces
ip link set lan1 up
ip link set lan2 up
ip link set lan3 up
@@ -134,11 +134,11 @@ without using a VLAN based configuration.
*gateway*
.. code-block:: sh
- # For kernels earlier than v5.12, the master interface needs to be
- # brought up manually before the slave ports.
+ # For kernels earlier than v5.12, the conduit interface needs to be
+ # brought up manually before the user ports.
ip link set eth0 up
- # bring up the slave interfaces
+ # bring up the user interfaces
ip link set wan up
ip link set lan1 up
ip link set lan2 up
@@ -178,14 +178,14 @@ configuration.
ip link add link eth0 name eth0.2 type vlan id 2
ip link add link eth0 name eth0.3 type vlan id 3
- # For kernels earlier than v5.12, the master interface needs to be
- # brought up manually before the slave ports.
+ # For kernels earlier than v5.12, the conduit interface needs to be
+ # brought up manually before the user ports.
ip link set eth0 up
ip link set eth0.1 up
ip link set eth0.2 up
ip link set eth0.3 up
- # bring up the slave interfaces
+ # bring up the user interfaces
ip link set lan1 up
ip link set lan2 up
ip link set lan3 up
@@ -221,12 +221,12 @@ configuration.
# tag traffic on CPU port
ip link add link eth0 name eth0.1 type vlan id 1
- # For kernels earlier than v5.12, the master interface needs to be
- # brought up manually before the slave ports.
+ # For kernels earlier than v5.12, the conduit interface needs to be
+ # brought up manually before the user ports.
ip link set eth0 up
ip link set eth0.1 up
- # bring up the slave interfaces
+ # bring up the user interfaces
ip link set lan1 up
ip link set lan2 up
ip link set lan3 up
@@ -261,13 +261,13 @@ configuration.
ip link add link eth0 name eth0.1 type vlan id 1
ip link add link eth0 name eth0.2 type vlan id 2
- # For kernels earlier than v5.12, the master interface needs to be
- # brought up manually before the slave ports.
+ # For kernels earlier than v5.12, the conduit interface needs to be
+ # brought up manually before the user ports.
ip link set eth0 up
ip link set eth0.1 up
ip link set eth0.2 up
- # bring up the slave interfaces
+ # bring up the user interfaces
ip link set wan up
ip link set lan1 up
ip link set lan2 up
@@ -380,22 +380,22 @@ affinities according to the available CPU ports.
Secondly, it is possible to perform load balancing between CPU ports on a per
packet basis, rather than statically assigning user ports to CPU ports.
-This can be achieved by placing the DSA masters under a LAG interface (bonding
+This can be achieved by placing the DSA conduits under a LAG interface (bonding
or team). DSA monitors this operation and creates a mirror of this software LAG
-on the CPU ports facing the physical DSA masters that constitute the LAG slave
+on the CPU ports facing the physical DSA conduits that constitute the LAG slave
devices.
To make use of multiple CPU ports, the firmware (device tree) description of
-the switch must mark all the links between CPU ports and their DSA masters
+the switch must mark all the links between CPU ports and their DSA conduits
using the ``ethernet`` reference/phandle. At startup, only a single CPU port
-and DSA master will be used - the numerically first port from the firmware
+and DSA conduit will be used - the numerically first port from the firmware
description which has an ``ethernet`` property. It is up to the user to
-configure the system for the switch to use other masters.
+configure the system for the switch to use other conduits.
DSA uses the ``rtnl_link_ops`` mechanism (with a "dsa" ``kind``) to allow
-changing the DSA master of a user port. The ``IFLA_DSA_MASTER`` u32 netlink
-attribute contains the ifindex of the master device that handles each slave
-device. The DSA master must be a valid candidate based on firmware node
+changing the DSA conduit of a user port. The ``IFLA_DSA_CONDUIT`` u32 netlink
+attribute contains the ifindex of the conduit device that handles each user
+device. The DSA conduit must be a valid candidate based on firmware node
information, or a LAG interface which contains only slaves which are valid
candidates.
@@ -403,7 +403,7 @@ Using iproute2, the following manipulations are possible:
.. code-block:: sh
- # See the DSA master in current use
+ # See the DSA conduit in current use
ip -d link show dev swp0
(...)
dsa master eth0
@@ -414,7 +414,7 @@ Using iproute2, the following manipulations are possible:
ip link set swp2 type dsa master eth1
ip link set swp3 type dsa master eth0
- # CPU ports in LAG, using explicit assignment of the DSA master
+ # CPU ports in LAG, using explicit assignment of the DSA conduit
ip link add bond0 type bond mode balance-xor && ip link set bond0 up
ip link set eth1 down && ip link set eth1 master bond0
ip link set swp0 type dsa master bond0
@@ -426,7 +426,7 @@ Using iproute2, the following manipulations are possible:
(...)
dsa master bond0
- # CPU ports in LAG, relying on implicit migration of the DSA master
+ # CPU ports in LAG, relying on implicit migration of the DSA conduit
ip link add bond0 type bond mode balance-xor && ip link set bond0 up
ip link set eth0 down && ip link set eth0 master bond0
ip link set eth1 down && ip link set eth1 master bond0
@@ -435,24 +435,24 @@ Using iproute2, the following manipulations are possible:
dsa master bond0
Notice that in the case of CPU ports under a LAG, the use of the
-``IFLA_DSA_MASTER`` netlink attribute is not strictly needed, but rather, DSA
-reacts to the ``IFLA_MASTER`` attribute change of its present master (``eth0``)
+``IFLA_DSA_CONDUIT`` netlink attribute is not strictly needed, but rather, DSA
+reacts to the ``IFLA_MASTER`` attribute change of its present conduit (``eth0``)
and migrates all user ports to the new upper of ``eth0``, ``bond0``. Similarly,
when ``bond0`` is destroyed using ``RTM_DELLINK``, DSA migrates the user ports
-that were assigned to this interface to the first physical DSA master which is
+that were assigned to this interface to the first physical DSA conduit which is
eligible, based on the firmware description (it effectively reverts to the
startup configuration).
In a setup with more than 2 physical CPU ports, it is therefore possible to mix
-static user to CPU port assignment with LAG between DSA masters. It is not
-possible to statically assign a user port towards a DSA master that has any
-upper interfaces (this includes LAG devices - the master must always be the LAG
+static user to CPU port assignment with LAG between DSA conduits. It is not
+possible to statically assign a user port towards a DSA conduit that has any
+upper interfaces (this includes LAG devices - the conduit must always be the LAG
in this case).
-Live changing of the DSA master (and thus CPU port) affinity of a user port is
+Live changing of the DSA conduit (and thus CPU port) affinity of a user port is
permitted, in order to allow dynamic redistribution in response to traffic.
-Physical DSA masters are allowed to join and leave at any time a LAG interface
-used as a DSA master; however, DSA will reject a LAG interface as a valid
-candidate for being a DSA master unless it has at least one physical DSA master
+Physical DSA conduits are allowed to join and leave at any time a LAG interface
+used as a DSA conduit; however, DSA will reject a LAG interface as a valid
+candidate for being a DSA conduit unless it has at least one physical DSA conduit
as a slave device.
diff --git a/Documentation/networking/dsa/dsa.rst b/Documentation/networking/dsa/dsa.rst
index a94ddf83348a..7b2e69cd7ef0 100644
--- a/Documentation/networking/dsa/dsa.rst
+++ b/Documentation/networking/dsa/dsa.rst
@@ -25,7 +25,7 @@ presence of a management port connected to an Ethernet controller capable of
receiving Ethernet frames from the switch. This is a very common setup for all
kinds of Ethernet switches found in Small Home and Office products: routers,
gateways, or even top-of-rack switches. This host Ethernet controller will
-be later referred to as "master" and "cpu" in DSA terminology and code.
+be later referred to as "conduit" and "cpu" in DSA terminology and code.
The D in DSA stands for Distributed, because the subsystem has been designed
with the ability to configure and manage cascaded switches on top of each other
@@ -35,7 +35,7 @@ of multiple switches connected to each other is called a "switch tree".
For each front-panel port, DSA creates specialized network devices which are
used as controlling and data-flowing endpoints for use by the Linux networking
-stack. These specialized network interfaces are referred to as "slave" network
+stack. These specialized network interfaces are referred to as "user" network
interfaces in DSA terminology and code.
The ideal case for using DSA is when an Ethernet switch supports a "switch tag"
@@ -56,12 +56,16 @@ Note that DSA does not currently create network interfaces for the "cpu" and
- the "cpu" port is the Ethernet switch facing side of the management
controller, and as such, would create a duplication of feature, since you
- would get two interfaces for the same conduit: master netdev, and "cpu" netdev
+ would get two interfaces for the same conduit: conduit netdev, and "cpu" netdev
- the "dsa" port(s) are just conduits between two or more switches, and as such
cannot really be used as proper network interfaces either, only the
downstream, or the top-most upstream interface makes sense with that model
+NB: for the past 15 years, the DSA subsystem had been making use of the terms
+"master" (rather than "conduit") and "slave" (rather than "user"). These terms
+have been removed from the DSA codebase and phased out of the uAPI.
+
Switch tagging protocols
------------------------
@@ -80,14 +84,14 @@ methods of the ``struct dsa_device_ops`` structure, which are detailed below.
Tagging protocols generally fall in one of three categories:
1. The switch-specific frame header is located before the Ethernet header,
- shifting to the right (from the perspective of the DSA master's frame
+ shifting to the right (from the perspective of the DSA conduit's frame
parser) the MAC DA, MAC SA, EtherType and the entire L2 payload.
2. The switch-specific frame header is located before the EtherType, keeping
- the MAC DA and MAC SA in place from the DSA master's perspective, but
+ the MAC DA and MAC SA in place from the DSA conduit's perspective, but
shifting the 'real' EtherType and L2 payload to the right.
3. The switch-specific frame header is located at the tail of the packet,
keeping all frame headers in place and not altering the view of the packet
- that the DSA master's frame parser has.
+ that the DSA conduit's frame parser has.
A tagging protocol may tag all packets with switch tags of the same length, or
the tag length might vary (for example packets with PTP timestamps might
@@ -95,7 +99,7 @@ require an extended switch tag, or there might be one tag length on TX and a
different one on RX). Either way, the tagging protocol driver must populate the
``struct dsa_device_ops::needed_headroom`` and/or ``struct dsa_device_ops::needed_tailroom``
with the length in octets of the longest switch frame header/trailer. The DSA
-framework will automatically adjust the MTU of the master interface to
+framework will automatically adjust the MTU of the conduit interface to
accommodate for this extra size in order for DSA user ports to support the
standard MTU (L2 payload length) of 1500 octets. The ``needed_headroom`` and
``needed_tailroom`` properties are also used to request from the network stack,
@@ -140,18 +144,18 @@ adding or removing the ``ETH_P_EDSA`` EtherType and some padding octets).
It is possible to construct cascaded setups of DSA switches even if their
tagging protocols are not compatible with one another. In this case, there are
no DSA links in this fabric, and each switch constitutes a disjoint DSA switch
-tree. The DSA links are viewed as simply a pair of a DSA master (the out-facing
+tree. The DSA links are viewed as simply a pair of a DSA conduit (the out-facing
port of the upstream DSA switch) and a CPU port (the in-facing port of the
downstream DSA switch).
The tagging protocol of the attached DSA switch tree can be viewed through the
-``dsa/tagging`` sysfs attribute of the DSA master::
+``dsa/tagging`` sysfs attribute of the DSA conduit::
cat /sys/class/net/eth0/dsa/tagging
If the hardware and driver are capable, the tagging protocol of the DSA switch
tree can be changed at runtime. This is done by writing the new tagging
-protocol name to the same sysfs device attribute as above (the DSA master and
+protocol name to the same sysfs device attribute as above (the DSA conduit and
all attached switch ports must be down while doing this).
It is desirable that all tagging protocols are testable with the ``dsa_loop``
@@ -159,7 +163,7 @@ mockup driver, which can be attached to any network interface. The goal is that
any network interface should be capable of transmitting the same packet in the
same way, and the tagger should decode the same received packet in the same way
regardless of the driver used for the switch control path, and the driver used
-for the DSA master.
+for the DSA conduit.
The transmission of a packet goes through the tagger's ``xmit`` function.
The passed ``struct sk_buff *skb`` has ``skb->data`` pointing at
@@ -183,44 +187,44 @@ virtual DSA user network interface corresponding to the physical front-facing
switch port that the packet was received on.
Since tagging protocols in category 1 and 2 break software (and most often also
-hardware) packet dissection on the DSA master, features such as RPS (Receive
-Packet Steering) on the DSA master would be broken. The DSA framework deals
+hardware) packet dissection on the DSA conduit, features such as RPS (Receive
+Packet Steering) on the DSA conduit would be broken. The DSA framework deals
with this by hooking into the flow dissector and shifting the offset at which
-the IP header is to be found in the tagged frame as seen by the DSA master.
+the IP header is to be found in the tagged frame as seen by the DSA conduit.
This behavior is automatic based on the ``overhead`` value of the tagging
protocol. If not all packets are of equal size, the tagger can implement the
``flow_dissect`` method of the ``struct dsa_device_ops`` and override this
default behavior by specifying the correct offset incurred by each individual
RX packet. Tail taggers do not cause issues to the flow dissector.
-Checksum offload should work with category 1 and 2 taggers when the DSA master
+Checksum offload should work with category 1 and 2 taggers when the DSA conduit
driver declares NETIF_F_HW_CSUM in vlan_features and looks at csum_start and
csum_offset. For those cases, DSA will shift the checksum start and offset by
-the tag size. If the DSA master driver still uses the legacy NETIF_F_IP_CSUM
+the tag size. If the DSA conduit driver still uses the legacy NETIF_F_IP_CSUM
or NETIF_F_IPV6_CSUM in vlan_features, the offload might only work if the
offload hardware already expects that specific tag (perhaps due to matching
-vendors). DSA slaves inherit those flags from the master port, and it is up to
+vendors). DSA user ports inherit those flags from the conduit, and it is up to
the driver to correctly fall back to software checksum when the IP header is not
where the hardware expects. If that check is ineffective, the packets might go
to the network without a proper checksum (the checksum field will have the
pseudo IP header sum). For category 3, when the offload hardware does not
already expect the switch tag in use, the checksum must be calculated before any
-tag is inserted (i.e. inside the tagger). Otherwise, the DSA master would
+tag is inserted (i.e. inside the tagger). Otherwise, the DSA conduit would
include the tail tag in the (software or hardware) checksum calculation. Then,
when the tag gets stripped by the switch during transmission, it will leave an
incorrect IP checksum in place.
Due to various reasons (most common being category 1 taggers being associated
-with DSA-unaware masters, mangling what the master perceives as MAC DA), the
-tagging protocol may require the DSA master to operate in promiscuous mode, to
+with DSA-unaware conduits, mangling what the conduit perceives as MAC DA), the
+tagging protocol may require the DSA conduit to operate in promiscuous mode, to
receive all frames regardless of the value of the MAC DA. This can be done by
-setting the ``promisc_on_master`` property of the ``struct dsa_device_ops``.
-Note that this assumes a DSA-unaware master driver, which is the norm.
+setting the ``promisc_on_conduit`` property of the ``struct dsa_device_ops``.
+Note that this assumes a DSA-unaware conduit driver, which is the norm.
-Master network devices
-----------------------
+Conduit network devices
+-----------------------
-Master network devices are regular, unmodified Linux network device drivers for
+Conduit network devices are regular, unmodified Linux network device drivers for
the CPU/management Ethernet interface. Such a driver might occasionally need to
know whether DSA is enabled (e.g.: to enable/disable specific offload features),
but the DSA subsystem has been proven to work with industry standard drivers:
@@ -232,14 +236,14 @@ Ethernet switch.
Networking stack hooks
----------------------
-When a master netdev is used with DSA, a small hook is placed in the
+When a conduit netdev is used with DSA, a small hook is placed in the
networking stack is in order to have the DSA subsystem process the Ethernet
switch specific tagging protocol. DSA accomplishes this by registering a
specific (and fake) Ethernet type (later becoming ``skb->protocol``) with the
networking stack, this is also known as a ``ptype`` or ``packet_type``. A typical
Ethernet Frame receive sequence looks like this:
-Master network device (e.g.: e1000e):
+Conduit network device (e.g.: e1000e):
1. Receive interrupt fires:
@@ -269,16 +273,16 @@ Master network device (e.g.: e1000e):
- inspect and strip switch tag protocol to determine originating port
- locate per-port network device
- - invoke ``eth_type_trans()`` with the DSA slave network device
+ - invoke ``eth_type_trans()`` with the DSA user network device
- invoked ``netif_receive_skb()``
-Past this point, the DSA slave network devices get delivered regular Ethernet
+Past this point, the DSA user network devices get delivered regular Ethernet
frames that can be processed by the networking stack.
-Slave network devices
----------------------
+User network devices
+--------------------
-Slave network devices created by DSA are stacked on top of their master network
+User network devices created by DSA are stacked on top of their conduit network
device, each of these network interfaces will be responsible for being a
controlling and data-flowing end-point for each front-panel port of the switch.
These interfaces are specialized in order to:
@@ -289,31 +293,31 @@ These interfaces are specialized in order to:
Wake-on-LAN, register dumps...
- manage external/internal PHY: link, auto-negotiation, etc.
-These slave network devices have custom net_device_ops and ethtool_ops function
+These user network devices have custom net_device_ops and ethtool_ops function
pointers which allow DSA to introduce a level of layering between the networking
stack/ethtool and the switch driver implementation.
-Upon frame transmission from these slave network devices, DSA will look up which
+Upon frame transmission from these user network devices, DSA will look up which
switch tagging protocol is currently registered with these network devices and
invoke a specific transmit routine which takes care of adding the relevant
switch tag in the Ethernet frames.
-These frames are then queued for transmission using the master network device
+These frames are then queued for transmission using the conduit network device
``ndo_start_xmit()`` function. Since they contain the appropriate switch tag, the
Ethernet switch will be able to process these incoming frames from the
management interface and deliver them to the physical switch port.
When using multiple CPU ports, it is possible to stack a LAG (bonding/team)
-device between the DSA slave devices and the physical DSA masters. The LAG
-device is thus also a DSA master, but the LAG slave devices continue to be DSA
-masters as well (just with no user port assigned to them; this is needed for
-recovery in case the LAG DSA master disappears). Thus, the data path of the LAG
-DSA master is used asymmetrically. On RX, the ``ETH_P_XDSA`` handler, which
-calls ``dsa_switch_rcv()``, is invoked early (on the physical DSA master;
-LAG slave). Therefore, the RX data path of the LAG DSA master is not used.
-On the other hand, TX takes place linearly: ``dsa_slave_xmit`` calls
-``dsa_enqueue_skb``, which calls ``dev_queue_xmit`` towards the LAG DSA master.
-The latter calls ``dev_queue_xmit`` towards one physical DSA master or the
+device between the DSA user devices and the physical DSA conduits. The LAG
+device is thus also a DSA conduit, but the LAG slave devices continue to be DSA
+conduits as well (just with no user port assigned to them; this is needed for
+recovery in case the LAG DSA conduit disappears). Thus, the data path of the LAG
+DSA conduit is used asymmetrically. On RX, the ``ETH_P_XDSA`` handler, which
+calls ``dsa_switch_rcv()``, is invoked early (on the physical DSA conduit;
+LAG slave). Therefore, the RX data path of the LAG DSA conduit is not used.
+On the other hand, TX takes place linearly: ``dsa_user_xmit`` calls
+``dsa_enqueue_skb``, which calls ``dev_queue_xmit`` towards the LAG DSA conduit.
+The latter calls ``dev_queue_xmit`` towards one physical DSA conduit or the
other, and in both cases, the packet exits the system through a hardware path
towards the switch.
@@ -352,11 +356,11 @@ perspective::
|| swp0 | | swp1 | | swp2 | | swp3 ||
++------+-+------+-+------+-+------++
-Slave MDIO bus
---------------
+User MDIO bus
+-------------
-In order to be able to read to/from a switch PHY built into it, DSA creates a
-slave MDIO bus which allows a specific switch driver to divert and intercept
+In order to be able to read to/from a switch PHY built into it, DSA creates an
+user MDIO bus which allows a specific switch driver to divert and intercept
MDIO reads/writes towards specific PHY addresses. In most MDIO-connected
switches, these functions would utilize direct or indirect PHY addressing mode
to return standard MII registers from the switch builtin PHYs, allowing the PHY
@@ -364,7 +368,7 @@ library and/or to return link status, link partner pages, auto-negotiation
results, etc.
For Ethernet switches which have both external and internal MDIO buses, the
-slave MII bus can be utilized to mux/demux MDIO reads and writes towards either
+user MII bus can be utilized to mux/demux MDIO reads and writes towards either
internal or external MDIO devices this switch might be connected to: internal
PHYs, external PHYs, or even external switches.
@@ -381,10 +385,10 @@ DSA data structures are defined in ``include/net/dsa.h`` as well as
- ``dsa_platform_data``: platform device configuration data which can reference
a collection of dsa_chip_data structures if multiple switches are cascaded,
- the master network device this switch tree is attached to needs to be
+ the conduit network device this switch tree is attached to needs to be
referenced
-- ``dsa_switch_tree``: structure assigned to the master network device under
+- ``dsa_switch_tree``: structure assigned to the conduit network device under
``dsa_ptr``, this structure references a dsa_platform_data structure as well as
the tagging protocol supported by the switch tree, and which receive/transmit
function hooks should be invoked, information about the directly attached
@@ -392,7 +396,7 @@ DSA data structures are defined in ``include/net/dsa.h`` as well as
referenced to address individual switches in the tree.
- ``dsa_switch``: structure describing a switch device in the tree, referencing
- a ``dsa_switch_tree`` as a backpointer, slave network devices, master network
+ a ``dsa_switch_tree`` as a backpointer, user network devices, conduit network
device, and a reference to the backing``dsa_switch_ops``
- ``dsa_switch_ops``: structure referencing function pointers, see below for a
@@ -404,7 +408,7 @@ Design limitations
Lack of CPU/DSA network devices
-------------------------------
-DSA does not currently create slave network devices for the CPU or DSA ports, as
+DSA does not currently create user network devices for the CPU or DSA ports, as
described before. This might be an issue in the following cases:
- inability to fetch switch CPU port statistics counters using ethtool, which
@@ -419,7 +423,7 @@ described before. This might be an issue in the following cases:
Common pitfalls using DSA setups
--------------------------------
-Once a master network device is configured to use DSA (dev->dsa_ptr becomes
+Once a conduit network device is configured to use DSA (dev->dsa_ptr becomes
non-NULL), and the switch behind it expects a tagging protocol, this network
interface can only exclusively be used as a conduit interface. Sending packets
directly through this interface (e.g.: opening a socket using this interface)
@@ -440,7 +444,7 @@ DSA currently leverages the following subsystems:
MDIO/PHY library
----------------
-Slave network devices exposed by DSA may or may not be interfacing with PHY
+User network devices exposed by DSA may or may not be interfacing with PHY
devices (``struct phy_device`` as defined in ``include/linux/phy.h)``, but the DSA
subsystem deals with all possible combinations:
@@ -450,7 +454,7 @@ subsystem deals with all possible combinations:
- special, non-autonegotiated or non MDIO-managed PHY devices: SFPs, MoCA; a.k.a
fixed PHYs
-The PHY configuration is done by the ``dsa_slave_phy_setup()`` function and the
+The PHY configuration is done by the ``dsa_user_phy_setup()`` function and the
logic basically looks like this:
- if Device Tree is used, the PHY device is looked up using the standard
@@ -463,7 +467,7 @@ logic basically looks like this:
and connected transparently using the special fixed MDIO bus driver
- finally, if the PHY is built into the switch, as is very common with
- standalone switch packages, the PHY is probed using the slave MII bus created
+ standalone switch packages, the PHY is probed using the user MII bus created
by DSA
@@ -472,7 +476,7 @@ SWITCHDEV
DSA directly utilizes SWITCHDEV when interfacing with the bridge layer, and
more specifically with its VLAN filtering portion when configuring VLANs on top
-of per-port slave network devices. As of today, the only SWITCHDEV objects
+of per-port user network devices. As of today, the only SWITCHDEV objects
supported by DSA are the FDB and VLAN objects.
Devlink
@@ -589,8 +593,8 @@ is torn down when the first switch unregisters.
It is mandatory for DSA switch drivers to implement the ``shutdown()`` callback
of their respective bus, and call ``dsa_switch_shutdown()`` from it (a minimal
version of the full teardown performed by ``dsa_unregister_switch()``).
-The reason is that DSA keeps a reference on the master net device, and if the
-driver for the master device decides to unbind on shutdown, DSA's reference
+The reason is that DSA keeps a reference on the conduit net device, and if the
+driver for the conduit device decides to unbind on shutdown, DSA's reference
will block that operation from finalizing.
Either ``dsa_switch_shutdown()`` or ``dsa_unregister_switch()`` must be called,
@@ -615,7 +619,7 @@ Switch configuration
tag formats.
- ``change_tag_protocol``: when the default tagging protocol has compatibility
- problems with the master or other issues, the driver may support changing it
+ problems with the conduit or other issues, the driver may support changing it
at runtime, either through a device tree property or through sysfs. In that
case, further calls to ``get_tag_protocol`` should report the protocol in
current use.
@@ -643,22 +647,22 @@ Switch configuration
PHY cannot be found. In this case, probing of the DSA switch continues
without that particular port.
-- ``port_change_master``: method through which the affinity (association used
+- ``port_change_conduit``: method through which the affinity (association used
for traffic termination purposes) between a user port and a CPU port can be
changed. By default all user ports from a tree are assigned to the first
available CPU port that makes sense for them (most of the times this means
the user ports of a tree are all assigned to the same CPU port, except for H
topologies as described in commit 2c0b03258b8b). The ``port`` argument
- represents the index of the user port, and the ``master`` argument represents
- the new DSA master ``net_device``. The CPU port associated with the new
- master can be retrieved by looking at ``struct dsa_port *cpu_dp =
- master->dsa_ptr``. Additionally, the master can also be a LAG device where
- all the slave devices are physical DSA masters. LAG DSA masters also have a
- valid ``master->dsa_ptr`` pointer, however this is not unique, but rather a
- duplicate of the first physical DSA master's (LAG slave) ``dsa_ptr``. In case
- of a LAG DSA master, a further call to ``port_lag_join`` will be emitted
+ represents the index of the user port, and the ``conduit`` argument represents
+ the new DSA conduit ``net_device``. The CPU port associated with the new
+ conduit can be retrieved by looking at ``struct dsa_port *cpu_dp =
+ conduit->dsa_ptr``. Additionally, the conduit can also be a LAG device where
+ all the slave devices are physical DSA conduits. LAG DSA also have a
+ valid ``conduit->dsa_ptr`` pointer, however this is not unique, but rather a
+ duplicate of the first physical DSA conduit's (LAG slave) ``dsa_ptr``. In case
+ of a LAG DSA conduit, a further call to ``port_lag_join`` will be emitted
separately for the physical CPU ports associated with the physical DSA
- masters, requesting them to create a hardware LAG associated with the LAG
+ conduits, requesting them to create a hardware LAG associated with the LAG
interface.
PHY devices and link management
@@ -670,16 +674,16 @@ PHY devices and link management
should return a 32-bit bitmask of "flags" that is private between the switch
driver and the Ethernet PHY driver in ``drivers/net/phy/\*``.
-- ``phy_read``: Function invoked by the DSA slave MDIO bus when attempting to read
+- ``phy_read``: Function invoked by the DSA user MDIO bus when attempting to read
the switch port MDIO registers. If unavailable, return 0xffff for each read.
For builtin switch Ethernet PHYs, this function should allow reading the link
status, auto-negotiation results, link partner pages, etc.
-- ``phy_write``: Function invoked by the DSA slave MDIO bus when attempting to write
+- ``phy_write``: Function invoked by the DSA user MDIO bus when attempting to write
to the switch port MDIO registers. If unavailable return a negative error
code.
-- ``adjust_link``: Function invoked by the PHY library when a slave network device
+- ``adjust_link``: Function invoked by the PHY library when a user network device
is attached to a PHY device. This function is responsible for appropriately
configuring the switch port link parameters: speed, duplex, pause based on
what the ``phy_device`` is providing.
@@ -698,14 +702,14 @@ Ethtool operations
typically return statistics strings, private flags strings, etc.
- ``get_ethtool_stats``: ethtool function used to query per-port statistics and
- return their values. DSA overlays slave network devices general statistics:
+ return their values. DSA overlays user network devices general statistics:
RX/TX counters from the network device, with switch driver specific statistics
per port
- ``get_sset_count``: ethtool function used to query the number of statistics items
- ``get_wol``: ethtool function used to obtain Wake-on-LAN settings per-port, this
- function may for certain implementations also query the master network device
+ function may for certain implementations also query the conduit network device
Wake-on-LAN settings if this interface needs to participate in Wake-on-LAN
- ``set_wol``: ethtool function used to configure Wake-on-LAN settings per-port,
@@ -747,13 +751,13 @@ Power management
should resume all Ethernet switch activities and re-configure the switch to be
in a fully active state
-- ``port_enable``: function invoked by the DSA slave network device ndo_open
+- ``port_enable``: function invoked by the DSA user network device ndo_open
function when a port is administratively brought up, this function should
fully enable a given switch port. DSA takes care of marking the port with
``BR_STATE_BLOCKING`` if the port is a bridge member, or ``BR_STATE_FORWARDING`` if it
was not, and propagating these changes down to the hardware
-- ``port_disable``: function invoked by the DSA slave network device ndo_close
+- ``port_disable``: function invoked by the DSA user network device ndo_close
function when a port is administratively brought down, this function should
fully disable a given switch port. DSA takes care of marking the port with
``BR_STATE_DISABLED`` and propagating changes to the hardware if this port is
diff --git a/Documentation/networking/dsa/lan9303.rst b/Documentation/networking/dsa/lan9303.rst
index e3c820db28ad..ab81b4e0139e 100644
--- a/Documentation/networking/dsa/lan9303.rst
+++ b/Documentation/networking/dsa/lan9303.rst
@@ -4,7 +4,7 @@ LAN9303 Ethernet switch driver
The LAN9303 is a three port 10/100 Mbps ethernet switch with integrated phys for
the two external ethernet ports. The third port is an RMII/MII interface to a
-host master network interface (e.g. fixed link).
+host conduit network interface (e.g. fixed link).
Driver details
diff --git a/Documentation/networking/dsa/sja1105.rst b/Documentation/networking/dsa/sja1105.rst
index e0219c1452ab..8ab60eef07d4 100644
--- a/Documentation/networking/dsa/sja1105.rst
+++ b/Documentation/networking/dsa/sja1105.rst
@@ -79,7 +79,7 @@ The hardware tags all traffic internally with a port-based VLAN (pvid), or it
decodes the VLAN information from the 802.1Q tag. Advanced VLAN classification
is not possible. Once attributed a VLAN tag, frames are checked against the
port's membership rules and dropped at ingress if they don't match any VLAN.
-This behavior is available when switch ports are enslaved to a bridge with
+This behavior is available when switch ports join a bridge with
``vlan_filtering 1``.
Normally the hardware is not configurable with respect to VLAN awareness, but
@@ -122,7 +122,7 @@ on egress. Using ``vlan_filtering=1``, the behavior is the other way around:
offloaded flows can be steered to TX queues based on the VLAN PCP, but the DSA
net devices are no longer able to do that. To inject frames into a hardware TX
queue with VLAN awareness active, it is necessary to create a VLAN
-sub-interface on the DSA master port, and send normal (0x8100) VLAN-tagged
+sub-interface on the DSA conduit port, and send normal (0x8100) VLAN-tagged
towards the switch, with the VLAN PCP bits set appropriately.
Management traffic (having DMAC 01-80-C2-xx-xx-xx or 01-19-1B-xx-xx-xx) is the
@@ -389,7 +389,7 @@ MDIO bus and PHY management
The SJA1105 does not have an MDIO bus and does not perform in-band AN either.
Therefore there is no link state notification coming from the switch device.
A board would need to hook up the PHYs connected to the switch to any other
-MDIO bus available to Linux within the system (e.g. to the DSA master's MDIO
+MDIO bus available to Linux within the system (e.g. to the DSA conduit's MDIO
bus). Link state management then works by the driver manually keeping in sync
(over SPI commands) the MAC link speed with the settings negotiated by the PHY.
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index e7ec9026e5db..4dfe0d9a57bb 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -2502,12 +2502,18 @@ use_tempaddr - INTEGER
* -1 (for point-to-point devices and loopback devices)
temp_valid_lft - INTEGER
- valid lifetime (in seconds) for temporary addresses.
+ valid lifetime (in seconds) for temporary addresses. If less than the
+ minimum required lifetime (typically 5 seconds), temporary addresses
+ will not be created.
Default: 172800 (2 days)
temp_prefered_lft - INTEGER
- Preferred lifetime (in seconds) for temporary addresses.
+ Preferred lifetime (in seconds) for temporary addresses. If
+ temp_prefered_lft is less than the minimum required lifetime (typically
+ 5 seconds), the preferred lifetime is the minimum required. If
+ temp_prefered_lft is greater than temp_valid_lft, the preferred lifetime
+ is temp_valid_lft.
Default: 86400 (1 day)
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index 15f1919d640c..69975ce25a02 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -25,6 +25,17 @@ add_addr_timeout - INTEGER (seconds)
Default: 120
+close_timeout - INTEGER (seconds)
+ Set the make-after-break timeout: in absence of any close or
+ shutdown syscall, MPTCP sockets will maintain the status
+ unchanged for such time, after the last subflow removal, before
+ moving to TCP_CLOSE.
+
+ The default value matches TCP_TIMEWAIT_LEN. This is a per-namespace
+ sysctl.
+
+ Default: 60
+
checksum_enabled - BOOLEAN
Control whether DSS checksum can be enabled.
diff --git a/Documentation/networking/page_pool.rst b/Documentation/networking/page_pool.rst
index 215ebc92752c..60993cb56b32 100644
--- a/Documentation/networking/page_pool.rst
+++ b/Documentation/networking/page_pool.rst
@@ -58,7 +58,9 @@ a page will cause no race conditions is enough.
.. kernel-doc:: include/net/page_pool/helpers.h
:identifiers: page_pool_put_page page_pool_put_full_page
- page_pool_recycle_direct page_pool_dev_alloc_pages
+ page_pool_recycle_direct page_pool_free_va
+ page_pool_dev_alloc_pages page_pool_dev_alloc_frag
+ page_pool_dev_alloc page_pool_dev_alloc_va
page_pool_get_dma_addr page_pool_get_dma_dir
.. kernel-doc:: net/core/page_pool.c
diff --git a/Documentation/userspace-api/netlink/genetlink-legacy.rst b/Documentation/userspace-api/netlink/genetlink-legacy.rst
index 0b3febd57ff5..70a77387f6c4 100644
--- a/Documentation/userspace-api/netlink/genetlink-legacy.rst
+++ b/Documentation/userspace-api/netlink/genetlink-legacy.rst
@@ -182,7 +182,7 @@ members
- ``name`` - The attribute name of the struct member
- ``type`` - One of the scalar types ``u8``, ``u16``, ``u32``, ``u64``, ``s8``,
- ``s16``, ``s32``, ``s64``, ``string`` or ``binary``.
+ ``s16``, ``s32``, ``s64``, ``string``, ``binary`` or ``bitfield32``.
- ``byte-order`` - ``big-endian`` or ``little-endian``
- ``doc``, ``enum``, ``enum-as-flags``, ``display-hint`` - Same as for
:ref:`attribute definitions <attribute_properties>`
diff --git a/MAINTAINERS b/MAINTAINERS
index 36815d2feb33..b2f53d5cae06 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14960,10 +14960,11 @@ W: https://github.com/multipath-tcp/mptcp_net-next/wiki
B: https://github.com/multipath-tcp/mptcp_net-next/issues
T: git https://github.com/multipath-tcp/mptcp_net-next.git export-net
T: git https://github.com/multipath-tcp/mptcp_net-next.git export
+F: Documentation/netlink/specs/mptcp.yaml
F: Documentation/networking/mptcp-sysctl.rst
F: include/net/mptcp.h
F: include/trace/events/mptcp.h
-F: include/uapi/linux/mptcp.h
+F: include/uapi/linux/mptcp*.h
F: net/mptcp/
F: tools/testing/selftests/bpf/*/*mptcp*.c
F: tools/testing/selftests/net/mptcp/
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
index 5fc613d24151..49cbdb55b4b3 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
@@ -13,7 +13,7 @@
/ {
aliases {
ethernet0 = &eth0;
- /* for dsa slave device */
+ /* for DSA user port device */
ethernet1 = &switch0port1;
ethernet2 = &switch0port2;
ethernet3 = &switch0port3;
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index f9a3444753c2..ff4868c83cd8 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -118,6 +118,7 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define BTMTKSDIO_FUNC_ENABLED 3
#define BTMTKSDIO_PATCH_ENABLED 4
#define BTMTKSDIO_HW_RESET_ACTIVE 5
+#define BTMTKSDIO_BT_WAKE_ENABLED 6
struct mtkbtsdio_hdr {
__le16 len;
@@ -554,7 +555,7 @@ static void btmtksdio_txrx_work(struct work_struct *work)
sdio_claim_host(bdev->func);
/* Disable interrupt */
- sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
txrx_timeout = jiffies + 5 * HZ;
@@ -576,7 +577,7 @@ static void btmtksdio_txrx_work(struct work_struct *work)
if ((int_status & FW_MAILBOX_INT) &&
bdev->data->chipid == 0x7921) {
sdio_writel(bdev->func, PH2DSM0R_DRIVER_OWN,
- MTK_REG_PH2DSM0R, 0);
+ MTK_REG_PH2DSM0R, NULL);
}
if (int_status & FW_OWN_BACK_INT)
@@ -608,7 +609,7 @@ static void btmtksdio_txrx_work(struct work_struct *work)
} while (int_status || time_is_before_jiffies(txrx_timeout));
/* Enable interrupt */
- sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, 0);
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
sdio_release_host(bdev->func);
@@ -620,8 +621,14 @@ static void btmtksdio_interrupt(struct sdio_func *func)
{
struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
+ if (test_bit(BTMTKSDIO_BT_WAKE_ENABLED, &bdev->tx_state)) {
+ if (bdev->hdev->suspended)
+ pm_wakeup_event(bdev->dev, 0);
+ clear_bit(BTMTKSDIO_BT_WAKE_ENABLED, &bdev->tx_state);
+ }
+
/* Disable interrupt */
- sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
schedule_work(&bdev->txrx_work);
}
@@ -1454,6 +1461,23 @@ static int btmtksdio_runtime_suspend(struct device *dev)
return err;
}
+static int btmtksdio_system_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmtksdio_dev *bdev;
+
+ bdev = sdio_get_drvdata(func);
+ if (!bdev)
+ return 0;
+
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ return 0;
+
+ set_bit(BTMTKSDIO_BT_WAKE_ENABLED, &bdev->tx_state);
+
+ return btmtksdio_runtime_suspend(dev);
+}
+
static int btmtksdio_runtime_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
@@ -1474,8 +1498,16 @@ static int btmtksdio_runtime_resume(struct device *dev)
return err;
}
-static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend,
- btmtksdio_runtime_resume, NULL);
+static int btmtksdio_system_resume(struct device *dev)
+{
+ return btmtksdio_runtime_resume(dev);
+}
+
+static const struct dev_pm_ops btmtksdio_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(btmtksdio_system_suspend, btmtksdio_system_resume)
+ RUNTIME_PM_OPS(btmtksdio_runtime_suspend, btmtksdio_runtime_resume, NULL)
+};
+
#define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops)
#else /* CONFIG_PM */
#define BTMTKSDIO_PM_OPS NULL
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 5a35ac4138c6..fdb0fae88d1c 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -205,6 +205,44 @@ static int qca_send_reset(struct hci_dev *hdev)
return 0;
}
+static int qca_read_fw_board_id(struct hci_dev *hdev, u16 *bid)
+{
+ u8 cmd;
+ struct sk_buff *skb;
+ struct edl_event_hdr *edl;
+ int err = 0;
+
+ cmd = EDL_GET_BID_REQ_CMD;
+ skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
+ &cmd, 0, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Reading QCA board ID failed (%d)", err);
+ return err;
+ }
+
+ edl = skb_pull_data(skb, sizeof(*edl));
+ if (!edl) {
+ bt_dev_err(hdev, "QCA read board ID with no header");
+ err = -EILSEQ;
+ goto out;
+ }
+
+ if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+ edl->rtype != EDL_GET_BID_REQ_CMD) {
+ bt_dev_err(hdev, "QCA Wrong packet: %d %d", edl->cresp, edl->rtype);
+ err = -EIO;
+ goto out;
+ }
+
+ *bid = (edl->data[1] << 8) + edl->data[2];
+ bt_dev_dbg(hdev, "%s: bid = %x", __func__, *bid);
+
+out:
+ kfree_skb(skb);
+ return err;
+}
+
int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
{
struct sk_buff *skb;
@@ -574,6 +612,23 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
+ struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
+{
+ const char *variant;
+
+ /* hsp gf chip */
+ if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
+ variant = "g";
+ else
+ variant = "";
+
+ if (bid == 0x0)
+ snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
+ else
+ snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
+}
+
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
const char *firmware_name)
@@ -582,6 +637,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
int err;
u8 rom_ver = 0;
u32 soc_ver;
+ u16 boardid = 0;
bt_dev_dbg(hdev, "QCA setup on UART");
@@ -615,6 +671,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
snprintf(config.fwname, sizeof(config.fwname),
"qca/apbtfw%02x.tlv", rom_ver);
break;
+ case QCA_QCA2066:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
case QCA_QCA6390:
snprintf(config.fwname, sizeof(config.fwname),
"qca/htbtfw%02x.tlv", rom_ver);
@@ -649,6 +709,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Give the controller some time to get ready to receive the NVM */
msleep(10);
+ if (soc_type == QCA_QCA2066)
+ qca_read_fw_board_id(hdev, &boardid);
+
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
if (firmware_name) {
@@ -671,6 +734,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
snprintf(config.fwname, sizeof(config.fwname),
"qca/apnv%02x.bin", rom_ver);
break;
+ case QCA_QCA2066:
+ qca_generate_hsp_nvm_name(config.fwname,
+ sizeof(config.fwname), ver, rom_ver, boardid);
+ break;
case QCA_QCA6390:
snprintf(config.fwname, sizeof(config.fwname),
"qca/htnv%02x.bin", rom_ver);
@@ -702,6 +769,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
switch (soc_type) {
case QCA_WCN3991:
+ case QCA_QCA2066:
case QCA_QCA6390:
case QCA_WCN6750:
case QCA_WCN6855:
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 03bff5c0059d..dc31984f71dc 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -12,6 +12,7 @@
#define EDL_PATCH_VER_REQ_CMD (0x19)
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
#define EDL_GET_BUILD_INFO_CMD (0x20)
+#define EDL_GET_BID_REQ_CMD (0x23)
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
#define EDL_PATCH_CONFIG_CMD (0x28)
#define MAX_SIZE_PER_TLV_SEGMENT (243)
@@ -47,7 +48,8 @@
((le32_to_cpu(soc_id) << 16) | (le16_to_cpu(rom_ver)))
#define QCA_FW_BUILD_VER_LEN 255
-
+#define QCA_HSP_GF_SOC_ID 0x1200
+#define QCA_HSP_GF_SOC_MASK 0x0000ff00
enum qca_baudrate {
QCA_BAUDRATE_115200 = 0,
@@ -146,6 +148,7 @@ enum qca_btsoc_type {
QCA_WCN3990,
QCA_WCN3998,
QCA_WCN3991,
+ QCA_QCA2066,
QCA_QCA6390,
QCA_WCN6750,
QCA_WCN6855,
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 499f4809fcdf..b8e9de887b5d 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -477,6 +477,7 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0035), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED },
+ { USB_DEVICE(0x8087, 0x0038), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED |
BTUSB_INTEL_NO_WBS_SUPPORT |
@@ -543,6 +544,10 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -644,6 +649,9 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -2818,6 +2826,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
goto err_free_wc;
}
+ if (data->evt_skb == NULL)
+ goto err_free_wc;
+
/* Parse and handle the return WMT event */
wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
if (wmt_evt->whdr.op != hdr->op) {
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index 19ad0e788646..a61757835695 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -512,6 +512,7 @@ struct bcm4377_hw {
unsigned long disable_aspm : 1;
unsigned long broken_ext_scan : 1;
unsigned long broken_mws_transport_config : 1;
+ unsigned long broken_le_coded : 1;
int (*send_calibration)(struct bcm4377_data *bcm4377);
int (*send_ptb)(struct bcm4377_data *bcm4377,
@@ -2372,6 +2373,8 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
if (bcm4377->hw->broken_ext_scan)
set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ if (bcm4377->hw->broken_le_coded)
+ set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
pci_set_drvdata(pdev, bcm4377);
hci_set_drvdata(hdev, bcm4377);
@@ -2461,6 +2464,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
.bar0_core2_window2 = 0x18107000,
.has_bar0_core2_window2 = true,
.broken_mws_transport_config = true,
+ .broken_le_coded = true,
.send_calibration = bcm4378_send_calibration,
.send_ptb = bcm4378_send_ptb,
},
@@ -2474,6 +2478,7 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
.has_bar0_core2_window2 = true,
.clear_pciecfg_subsystem_ctrl_bit19 = true,
.broken_mws_transport_config = true,
+ .broken_le_coded = true,
.send_calibration = bcm4387_send_calibration,
.send_ptb = bcm4378_send_ptb,
},
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 4b57e15f9c7a..067e248e3599 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1841,6 +1841,10 @@ static int qca_setup(struct hci_uart *hu)
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
switch (soc_type) {
+ case QCA_QCA2066:
+ soc_name = "qca2066";
+ break;
+
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -2032,6 +2036,11 @@ static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = {
.num_vregs = 4,
};
+static const struct qca_device_data qca_soc_data_qca2066 __maybe_unused = {
+ .soc_type = QCA_QCA2066,
+ .num_vregs = 0,
+};
+
static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = {
.soc_type = QCA_QCA6390,
.num_vregs = 0,
@@ -2559,6 +2568,7 @@ static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
#ifdef CONFIG_OF
static const struct of_device_id qca_bluetooth_of_match[] = {
+ { .compatible = "qcom,qca2066-bt", .data = &qca_soc_data_qca2066},
{ .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
{ .compatible = "qcom,qca9377-bt" },
@@ -2576,6 +2586,7 @@ MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
+ { "QCOM2066", (kernel_ulong_t)&qca_soc_data_qca2066 },
{ "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
{ "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
{ "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 47a9c2a5583c..9c11a0d0273b 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -371,8 +371,10 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (!sock)
return -ESHUTDOWN;
- dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info,
- IPPROTO_UDP, use_cache);
+ dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock, 0, &saddr,
+ key, 0, 0, key->tos,
+ use_cache ?
+ (struct dst_cache *) &info->dst_cache : NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -498,9 +500,10 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
struct in6_addr saddr;
struct socket *sock = rcu_dereference(bareudp->sock);
- dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock,
- &saddr, info, IPPROTO_UDP,
- use_cache);
+ dst = udp_tunnel6_dst_lookup(skb, dev, bareudp->net, sock,
+ 0, &saddr, &info->key,
+ 0, 0, info->key.tos,
+ use_cache ? &info->dst_cache : NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 4e27dc913cf7..0d628b35fd5c 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -757,7 +757,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
/* Create an untagged VLAN entry for the default PVID in case
* CONFIG_VLAN_8021Q is disabled and there are no calls to
- * dsa_slave_vlan_rx_add_vid() to create the default VLAN
+ * dsa_user_vlan_rx_add_vid() to create the default VLAN
* entry. Do this only when the tagging protocol is not
* DSA_TAG_PROTO_NONE
*/
@@ -958,7 +958,7 @@ static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port)
return NULL;
}
- return mdiobus_get_phy(ds->slave_mii_bus, port);
+ return mdiobus_get_phy(ds->user_mii_bus, port);
}
void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 4d55d8d18376..897e5e8b3d69 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -329,7 +329,7 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
* layer setup
*/
if (of_machine_is_compatible("brcm,bcm7445d0") &&
- strcmp(mdiodev->bus->name, "sf2 slave mii"))
+ strcmp(mdiodev->bus->name, "sf2 user mii"))
return -EPROBE_DEFER;
dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 326937e91f52..cadee5505c29 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -623,19 +623,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->master_mii_dn = dn;
- priv->slave_mii_bus = mdiobus_alloc();
- if (!priv->slave_mii_bus) {
+ priv->user_mii_bus = mdiobus_alloc();
+ if (!priv->user_mii_bus) {
err = -ENOMEM;
goto err_put_master_mii_bus_dev;
}
- priv->slave_mii_bus->priv = priv;
- priv->slave_mii_bus->name = "sf2 slave mii";
- priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
- priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
- snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
+ priv->user_mii_bus->priv = priv;
+ priv->user_mii_bus->name = "sf2 user mii";
+ priv->user_mii_bus->read = bcm_sf2_sw_mdio_read;
+ priv->user_mii_bus->write = bcm_sf2_sw_mdio_write;
+ snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
index++);
- priv->slave_mii_bus->dev.of_node = dn;
+ priv->user_mii_bus->dev.of_node = dn;
/* Include the pseudo-PHY address to divert reads towards our
* workaround. This is only required for 7445D0, since 7445E0
@@ -653,9 +653,9 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->indir_phy_mask = 0;
ds->phys_mii_mask = priv->indir_phy_mask;
- ds->slave_mii_bus = priv->slave_mii_bus;
- priv->slave_mii_bus->parent = ds->dev->parent;
- priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
+ ds->user_mii_bus = priv->user_mii_bus;
+ priv->user_mii_bus->parent = ds->dev->parent;
+ priv->user_mii_bus->phy_mask = ~priv->indir_phy_mask;
/* We need to make sure that of_phy_connect() will not work by
* removing the 'phandle' and 'linux,phandle' properties and
@@ -682,14 +682,14 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
phy_device_remove(phydev);
}
- err = mdiobus_register(priv->slave_mii_bus);
+ err = mdiobus_register(priv->user_mii_bus);
if (err && dn)
- goto err_free_slave_mii_bus;
+ goto err_free_user_mii_bus;
return 0;
-err_free_slave_mii_bus:
- mdiobus_free(priv->slave_mii_bus);
+err_free_user_mii_bus:
+ mdiobus_free(priv->user_mii_bus);
err_put_master_mii_bus_dev:
put_device(&priv->master_mii_bus->dev);
err_of_node_put:
@@ -699,10 +699,9 @@ err_of_node_put:
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
- mdiobus_unregister(priv->slave_mii_bus);
- mdiobus_free(priv->slave_mii_bus);
+ mdiobus_unregister(priv->user_mii_bus);
+ mdiobus_free(priv->user_mii_bus);
put_device(&priv->master_mii_bus->dev);
- of_node_put(priv->master_mii_dn);
}
static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
@@ -915,7 +914,7 @@ static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
* state machine and make it go in PHY_FORCING state instead.
*/
if (!status->link)
- netif_carrier_off(dsa_to_port(ds, port)->slave);
+ netif_carrier_off(dsa_to_port(ds, port)->user);
status->duplex = DUPLEX_FULL;
} else {
status->link = true;
@@ -989,7 +988,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
+ struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
@@ -1013,7 +1012,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
+ struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 00afc94ce522..424f896b5a6f 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -108,7 +108,7 @@ struct bcm_sf2_priv {
/* Master and slave MDIO bus controller */
unsigned int indir_phy_mask;
struct device_node *master_mii_dn;
- struct mii_bus *slave_mii_bus;
+ struct mii_bus *user_mii_bus;
struct mii_bus *master_mii_bus;
/* Bitmask of ports needing BRCM tags */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index c4010b7bf089..c88ee3dd4299 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -1102,7 +1102,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
+ struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1145,7 +1145,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
- struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
+ struct net_device *p = dsa_port_to_conduit(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index ee67adeb2cdb..fcb20eac332a 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1084,7 +1084,7 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port,
if (!dsa_port_is_user(dp))
return 0;
- vlan_vid_add(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
+ vlan_vid_add(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port);
return lan9303_enable_processing_port(chip, port);
}
@@ -1097,7 +1097,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
if (!dsa_port_is_user(dp))
return;
- vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
+ vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port);
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 1a2d5797bf98..9c185c9f0963 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -510,22 +510,22 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
struct dsa_switch *ds = priv->ds;
int err;
- ds->slave_mii_bus = mdiobus_alloc();
- if (!ds->slave_mii_bus)
+ ds->user_mii_bus = mdiobus_alloc();
+ if (!ds->user_mii_bus)
return -ENOMEM;
- ds->slave_mii_bus->priv = priv;
- ds->slave_mii_bus->read = gswip_mdio_rd;
- ds->slave_mii_bus->write = gswip_mdio_wr;
- ds->slave_mii_bus->name = "lantiq,xrx200-mdio";
- snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
+ ds->user_mii_bus->priv = priv;
+ ds->user_mii_bus->read = gswip_mdio_rd;
+ ds->user_mii_bus->write = gswip_mdio_wr;
+ ds->user_mii_bus->name = "lantiq,xrx200-mdio";
+ snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii",
dev_name(priv->dev));
- ds->slave_mii_bus->parent = priv->dev;
- ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
+ ds->user_mii_bus->parent = priv->dev;
+ ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask;
- err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ err = of_mdiobus_register(ds->user_mii_bus, mdio_np);
if (err)
- mdiobus_free(ds->slave_mii_bus);
+ mdiobus_free(ds->user_mii_bus);
return err;
}
@@ -2196,8 +2196,8 @@ disable_switch:
dsa_unregister_switch(priv->ds);
mdio_bus:
if (mdio_np) {
- mdiobus_unregister(priv->ds->slave_mii_bus);
- mdiobus_free(priv->ds->slave_mii_bus);
+ mdiobus_unregister(priv->ds->user_mii_bus);
+ mdiobus_free(priv->ds->user_mii_bus);
}
put_mdio_node:
of_node_put(mdio_np);
@@ -2219,10 +2219,10 @@ static void gswip_remove(struct platform_device *pdev)
dsa_unregister_switch(priv->ds);
- if (priv->ds->slave_mii_bus) {
- mdiobus_unregister(priv->ds->slave_mii_bus);
- of_node_put(priv->ds->slave_mii_bus->dev.of_node);
- mdiobus_free(priv->ds->slave_mii_bus);
+ if (priv->ds->user_mii_bus) {
+ mdiobus_unregister(priv->ds->user_mii_bus);
+ of_node_put(priv->ds->user_mii_bus->dev.of_node);
+ mdiobus_free(priv->ds->user_mii_bus);
}
for (i = 0; i < priv->num_gphy_fw; i++)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index cde8ef33d029..2534c3d122e4 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -56,6 +56,103 @@ int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
REG_SW_MTU_MASK, frame_size);
}
+/**
+ * ksz9477_handle_wake_reason - Handle wake reason on a specified port.
+ * @dev: The device structure.
+ * @port: The port number.
+ *
+ * This function reads the PME (Power Management Event) status register of a
+ * specified port to determine the wake reason. If there is no wake event, it
+ * returns early. Otherwise, it logs the wake reason which could be due to a
+ * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
+ * is then cleared to acknowledge the handling of the wake event.
+ *
+ * Return: 0 on success, or an error code on failure.
+ */
+static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port)
+{
+ u8 pme_status;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status);
+ if (ret)
+ return ret;
+
+ if (!pme_status)
+ return 0;
+
+ dev_dbg(dev->dev, "Wake event on port %d due to:%s%s\n", port,
+ pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
+ pme_status & PME_WOL_ENERGY ? " \"Enery detect\"" : "");
+
+ return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status);
+}
+
+/**
+ * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port.
+ * @dev: The device structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function checks the PME Pin Control Register to see if PME Pin Output
+ * Enable is set, indicating PME is enabled. If enabled, it sets the supported
+ * and active WoL flags.
+ */
+void ksz9477_get_wol(struct ksz_device *dev, int port,
+ struct ethtool_wolinfo *wol)
+{
+ u8 pme_ctrl;
+ int ret;
+
+ if (!dev->wakeup_source)
+ return;
+
+ wol->supported = WAKE_PHY;
+
+ ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl);
+ if (ret)
+ return;
+
+ if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
+ wol->wolopts |= WAKE_PHY;
+}
+
+/**
+ * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port.
+ * @dev: The device structure.
+ * @port: The port number.
+ * @wol: Pointer to ethtool Wake-on-LAN settings structure.
+ *
+ * This function configures Wake-on-LAN (WoL) settings for a specified port.
+ * It validates the provided WoL options, checks if PME is enabled via the
+ * switch's PME Pin Control Register, clears any previous wake reasons,
+ * and sets the Magic Packet flag in the port's PME control register if
+ * specified.
+ *
+ * Return: 0 on success, or other error codes on failure.
+ */
+int ksz9477_set_wol(struct ksz_device *dev, int port,
+ struct ethtool_wolinfo *wol)
+{
+ u8 pme_ctrl = 0;
+ int ret;
+
+ if (wol->wolopts & ~WAKE_PHY)
+ return -EINVAL;
+
+ if (!dev->wakeup_source)
+ return -EOPNOTSUPP;
+
+ ret = ksz9477_handle_wake_reason(dev, port);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_PHY)
+ pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
+
+ return ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl);
+}
+
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
@@ -1006,6 +1103,9 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
ksz9477_port_acl_init(dev, port);
+
+ /* clear pending wake flags */
+ ksz9477_handle_wake_reason(dev, port);
}
void ksz9477_config_cpu_port(struct dsa_switch *ds)
@@ -1170,7 +1270,7 @@ int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr)
{
struct ksz_device *dev = ds->priv;
- struct net_device *slave;
+ struct net_device *user;
struct dsa_port *hsr_dp;
u8 data, hsr_ports = 0;
@@ -1202,8 +1302,8 @@ void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr)
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true);
/* Setup HW supported features for lan HSR ports */
- slave = dsa_to_port(ds, port)->slave;
- slave->features |= KSZ9477_SUPPORTED_HSR_FEATURES;
+ user = dsa_to_port(ds, port)->user;
+ user->features |= KSZ9477_SUPPORTED_HSR_FEATURES;
}
void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr)
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index f90e2e8ebe80..fa8d0318b437 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -58,6 +58,10 @@ void ksz9477_switch_exit(struct ksz_device *dev);
void ksz9477_port_queue_split(struct ksz_device *dev, int port);
void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr);
void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr);
+void ksz9477_get_wol(struct ksz_device *dev, int port,
+ struct ethtool_wolinfo *wol);
+int ksz9477_set_wol(struct ksz_device *dev, int port,
+ struct ethtool_wolinfo *wol);
int ksz9477_port_acl_init(struct ksz_device *dev, int port);
void ksz9477_port_acl_free(struct ksz_device *dev, int port);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index b800ace40ce1..de788f424a3f 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -319,6 +319,8 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
+ .get_wol = ksz9477_get_wol,
+ .set_wol = ksz9477_set_wol,
.config_cpu_port = ksz9477_config_cpu_port,
.tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
@@ -441,6 +443,7 @@ static const u8 ksz8795_shifts[] = {
};
static const u16 ksz8863_regs[] = {
+ [REG_SW_MAC_ADDR] = 0x70,
[REG_IND_CTRL_0] = 0x79,
[REG_IND_DATA_8] = 0x7B,
[REG_IND_DATA_CHECK] = 0x7B,
@@ -1945,14 +1948,14 @@ static int ksz_irq_phy_setup(struct ksz_device *dev)
ret = irq;
goto out;
}
- ds->slave_mii_bus->irq[phy] = irq;
+ ds->user_mii_bus->irq[phy] = irq;
}
}
return 0;
out:
while (phy--)
if (BIT(phy) & ds->phys_mii_mask)
- irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+ irq_dispose_mapping(ds->user_mii_bus->irq[phy]);
return ret;
}
@@ -1964,7 +1967,7 @@ static void ksz_irq_phy_free(struct ksz_device *dev)
for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++)
if (BIT(phy) & ds->phys_mii_mask)
- irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+ irq_dispose_mapping(ds->user_mii_bus->irq[phy]);
}
static int ksz_mdio_register(struct ksz_device *dev)
@@ -1987,12 +1990,12 @@ static int ksz_mdio_register(struct ksz_device *dev)
bus->priv = dev;
bus->read = ksz_sw_mdio_read;
bus->write = ksz_sw_mdio_write;
- bus->name = "ksz slave smi";
+ bus->name = "ksz user smi";
snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
bus->parent = ds->dev;
bus->phy_mask = ~ds->phys_mii_mask;
- ds->slave_mii_bus = bus;
+ ds->user_mii_bus = bus;
if (dev->irq > 0) {
ret = ksz_irq_phy_setup(dev);
@@ -2344,7 +2347,7 @@ static void ksz_mib_read_work(struct work_struct *work)
if (!p->read) {
const struct dsa_port *dp = dsa_to_port(dev->ds, i);
- if (!netif_carrier_ok(dp->slave))
+ if (!netif_carrier_ok(dp->user))
mib->cnt_ptr = dev->info->reg_mib_cnt;
}
port_r_cnt(dev, i);
@@ -2464,7 +2467,7 @@ static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
mutex_lock(&mib->cnt_mutex);
/* Only read dropped counters if no link. */
- if (!netif_carrier_ok(dp->slave))
+ if (!netif_carrier_ok(dp->user))
mib->cnt_ptr = dev->info->reg_mib_cnt;
port_r_cnt(dev, port);
memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64));
@@ -2574,7 +2577,7 @@ static int ksz_port_setup(struct dsa_switch *ds, int port)
if (!dsa_is_user_port(ds, port))
return 0;
- /* setup slave port */
+ /* setup user port */
dev->dev_ops->port_setup(dev, port, false);
/* port_stp_state_set() will be called after to enable the port so
@@ -3542,6 +3545,26 @@ static int ksz_setup_tc(struct dsa_switch *ds, int port,
}
}
+static void ksz_get_wol(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *wol)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->get_wol)
+ dev->dev_ops->get_wol(dev, port, wol);
+}
+
+static int ksz_set_wol(struct dsa_switch *ds, int port,
+ struct ethtool_wolinfo *wol)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->set_wol)
+ return dev->dev_ops->set_wol(dev, port, wol);
+
+ return -EOPNOTSUPP;
+}
+
static int ksz_port_set_mac_address(struct dsa_switch *ds, int port,
const unsigned char *addr)
{
@@ -3567,8 +3590,8 @@ static int ksz_port_set_mac_address(struct dsa_switch *ds, int port,
static int ksz_switch_macaddr_get(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
- struct net_device *slave = dsa_to_port(ds, port)->slave;
- const unsigned char *addr = slave->dev_addr;
+ struct net_device *user = dsa_to_port(ds, port)->user;
+ const unsigned char *addr = user->dev_addr;
struct ksz_switch_macaddr *switch_macaddr;
struct ksz_device *dev = ds->priv;
const u16 *regs = dev->info->regs;
@@ -3726,6 +3749,8 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.get_pause_stats = ksz_get_pause_stats,
.port_change_mtu = ksz_change_mtu,
.port_max_mtu = ksz_max_mtu,
+ .get_wol = ksz_get_wol,
+ .set_wol = ksz_set_wol,
.get_ts_info = ksz_get_ts_info,
.port_hwtstamp_get = ksz_hwtstamp_get,
.port_hwtstamp_set = ksz_hwtstamp_set,
@@ -4158,6 +4183,9 @@ int ksz_switch_register(struct ksz_device *dev)
dev_err(dev->dev, "inconsistent synclko settings\n");
return -EINVAL;
}
+
+ dev->wakeup_source = of_property_read_bool(dev->dev->of_node,
+ "wakeup-source");
}
ret = dsa_register_switch(dev->ds);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 8842efca0871..a7394175fcf6 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -163,6 +163,7 @@ struct ksz_device {
phy_interface_t compat_interface;
bool synclko_125;
bool synclko_disable;
+ bool wakeup_source;
struct vlan_table *vlan_cache;
@@ -373,6 +374,10 @@ struct ksz_dev_ops {
int duplex, bool tx_pause, bool rx_pause);
void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val);
+ void (*get_wol)(struct ksz_device *dev, int port,
+ struct ethtool_wolinfo *wol);
+ int (*set_wol)(struct ksz_device *dev, int port,
+ struct ethtool_wolinfo *wol);
void (*config_cpu_port)(struct dsa_switch *ds);
int (*enable_stp_addr)(struct ksz_device *dev);
int (*reset)(struct ksz_device *dev);
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 4e22a695a64c..1fe105913c75 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -557,7 +557,7 @@ static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
struct skb_shared_hwtstamps hwtstamps = {};
int ret;
- /* timeout must include DSA master to transmit data, tstamp latency,
+ /* timeout must include DSA conduit to transmit data, tstamp latency,
* IRQ latency and time for reading the time stamp.
*/
ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index ecf5d3deb36e..d27c6b70a2f6 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1113,7 +1113,7 @@ mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
u32 val;
/* When a new MTU is set, DSA always set the CPU port's MTU to the
- * largest MTU of the slave ports. Because the switch only has a global
+ * largest MTU of the user ports. Because the switch only has a global
* RX length register, only allowing CPU port here is enough.
*/
if (!dsa_is_cpu_port(ds, port))
@@ -2069,7 +2069,7 @@ mt7530_setup_mdio_irq(struct mt7530_priv *priv)
unsigned int irq;
irq = irq_create_mapping(priv->irq_domain, p);
- ds->slave_mii_bus->irq[p] = irq;
+ ds->user_mii_bus->irq[p] = irq;
}
}
}
@@ -2163,7 +2163,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
if (!bus)
return -ENOMEM;
- ds->slave_mii_bus = bus;
+ ds->user_mii_bus = bus;
bus->priv = priv;
bus->name = KBUILD_MODNAME "-mii";
snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
@@ -2200,20 +2200,20 @@ mt7530_setup(struct dsa_switch *ds)
u32 id, val;
int ret, i;
- /* The parent node of master netdev which holds the common system
+ /* The parent node of conduit netdev which holds the common system
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
- dn = cpu_dp->master->dev.of_node->parent;
+ dn = cpu_dp->conduit->dev.of_node->parent;
/* It doesn't matter which CPU port is found first,
- * their masters should share the same parent OF node
+ * their conduits should share the same parent OF node
*/
break;
}
if (!dn) {
- dev_err(ds->dev, "parent OF node of DSA master not found");
+ dev_err(ds->dev, "parent OF node of DSA conduit not found");
return -EINVAL;
}
@@ -2488,7 +2488,7 @@ mt7531_setup(struct dsa_switch *ds)
if (mt7531_dual_sgmii_supported(priv)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
- /* Let ds->slave_mii_bus be able to access external phy. */
+ /* Let ds->user_mii_bus be able to access external phy. */
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
MT7531_EXT_P_MDC_11);
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
@@ -2717,7 +2717,7 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
dp = dsa_to_port(ds, port);
- phydev = dp->slave->phydev;
+ phydev = dp->user->phydev;
return mt7531_rgmii_setup(priv, port, interface, phydev);
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_NA:
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index ab434a77b059..42b1acaca33a 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2486,7 +2486,7 @@ static int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
else
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
- /* net/dsa/slave.c will call dsa_port_vlan_add() for the affected port
+ /* net/dsa/user.c will call dsa_port_vlan_add() for the affected port
* and then the CPU port. Do not warn for duplicates for the CPU port.
*/
warn = !dsa_is_cpu_port(ds, port) && !dsa_is_dsa_port(ds, port);
@@ -3719,7 +3719,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
return err;
chip->ds = ds;
- ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
+ ds->user_mii_bus = mv88e6xxx_default_mdio_bus(chip);
/* Since virtual bridges are mapped in the PVT, the number we support
* depends on the physical switch topology. We need to let DSA figure
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 9a3e5ec16972..61e95487732d 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -42,22 +42,22 @@ static struct net_device *felix_classify_db(struct dsa_db db)
}
}
-static int felix_cpu_port_for_master(struct dsa_switch *ds,
- struct net_device *master)
+static int felix_cpu_port_for_conduit(struct dsa_switch *ds,
+ struct net_device *conduit)
{
struct ocelot *ocelot = ds->priv;
struct dsa_port *cpu_dp;
int lag;
- if (netif_is_lag_master(master)) {
+ if (netif_is_lag_master(conduit)) {
mutex_lock(&ocelot->fwd_domain_lock);
- lag = ocelot_bond_get_id(ocelot, master);
+ lag = ocelot_bond_get_id(ocelot, conduit);
mutex_unlock(&ocelot->fwd_domain_lock);
return lag;
}
- cpu_dp = master->dsa_ptr;
+ cpu_dp = conduit->dsa_ptr;
return cpu_dp->index;
}
@@ -366,7 +366,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
* is the mode through which frames can be injected from and extracted to an
* external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
* running Linux, and this forms a DSA setup together with the enetc or fman
- * DSA master.
+ * DSA conduit.
*/
static void felix_npi_port_init(struct ocelot *ocelot, int port)
{
@@ -441,16 +441,16 @@ static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
return BIT(ocelot->num_phys_ports);
}
-static int felix_tag_npi_change_master(struct dsa_switch *ds, int port,
- struct net_device *master,
- struct netlink_ext_ack *extack)
+static int felix_tag_npi_change_conduit(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
struct ocelot *ocelot = ds->priv;
- if (netif_is_lag_master(master)) {
+ if (netif_is_lag_master(conduit)) {
NL_SET_ERR_MSG_MOD(extack,
- "LAG DSA master only supported using ocelot-8021q");
+ "LAG DSA conduit only supported using ocelot-8021q");
return -EOPNOTSUPP;
}
@@ -459,24 +459,24 @@ static int felix_tag_npi_change_master(struct dsa_switch *ds, int port,
* come back up until they're all changed to the new one.
*/
dsa_switch_for_each_user_port(other_dp, ds) {
- struct net_device *slave = other_dp->slave;
+ struct net_device *user = other_dp->user;
- if (other_dp != dp && (slave->flags & IFF_UP) &&
- dsa_port_to_master(other_dp) != master) {
+ if (other_dp != dp && (user->flags & IFF_UP) &&
+ dsa_port_to_conduit(other_dp) != conduit) {
NL_SET_ERR_MSG_MOD(extack,
- "Cannot change while old master still has users");
+ "Cannot change while old conduit still has users");
return -EOPNOTSUPP;
}
}
felix_npi_port_deinit(ocelot, ocelot->npi);
- felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master));
+ felix_npi_port_init(ocelot, felix_cpu_port_for_conduit(ds, conduit));
return 0;
}
/* Alternatively to using the NPI functionality, that same hardware MAC
- * connected internally to the enetc or fman DSA master can be configured to
+ * connected internally to the enetc or fman DSA conduit can be configured to
* use the software-defined tag_8021q frame format. As far as the hardware is
* concerned, it thinks it is a "dumb switch" - the queues of the CPU port
* module are now disconnected from it, but can still be accessed through
@@ -486,7 +486,7 @@ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
.setup = felix_tag_npi_setup,
.teardown = felix_tag_npi_teardown,
.get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
- .change_master = felix_tag_npi_change_master,
+ .change_conduit = felix_tag_npi_change_conduit,
};
static int felix_tag_8021q_setup(struct dsa_switch *ds)
@@ -561,11 +561,11 @@ static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
return dsa_cpu_ports(ds);
}
-static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port,
- struct net_device *master,
- struct netlink_ext_ack *extack)
+static int felix_tag_8021q_change_conduit(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack)
{
- int cpu = felix_cpu_port_for_master(ds, master);
+ int cpu = felix_cpu_port_for_conduit(ds, conduit);
struct ocelot *ocelot = ds->priv;
ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
@@ -578,7 +578,7 @@ static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
.setup = felix_tag_8021q_setup,
.teardown = felix_tag_8021q_teardown,
.get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
- .change_master = felix_tag_8021q_change_master,
+ .change_conduit = felix_tag_8021q_change_conduit,
};
static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
@@ -741,14 +741,14 @@ static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
!!felix->host_flood_mc_mask, true);
}
-static int felix_port_change_master(struct dsa_switch *ds, int port,
- struct net_device *master,
- struct netlink_ext_ack *extack)
+static int felix_port_change_conduit(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- return felix->tag_proto_ops->change_master(ds, port, master, extack);
+ return felix->tag_proto_ops->change_conduit(ds, port, conduit, extack);
}
static int felix_set_ageing_time(struct dsa_switch *ds,
@@ -953,7 +953,7 @@ static int felix_lag_join(struct dsa_switch *ds, int port,
if (!dsa_is_cpu_port(ds, port))
return 0;
- return felix_port_change_master(ds, port, lag.dev, extack);
+ return felix_port_change_conduit(ds, port, lag.dev, extack);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
@@ -967,7 +967,7 @@ static int felix_lag_leave(struct dsa_switch *ds, int port,
if (!dsa_is_cpu_port(ds, port))
return 0;
- return felix_port_change_master(ds, port, lag.dev, NULL);
+ return felix_port_change_conduit(ds, port, lag.dev, NULL);
}
static int felix_lag_change(struct dsa_switch *ds, int port)
@@ -1116,10 +1116,10 @@ static int felix_port_enable(struct dsa_switch *ds, int port,
return 0;
if (ocelot->npi >= 0) {
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
- if (felix_cpu_port_for_master(ds, master) != ocelot->npi) {
- dev_err(ds->dev, "Multiple masters are not allowed\n");
+ if (felix_cpu_port_for_conduit(ds, conduit) != ocelot->npi) {
+ dev_err(ds->dev, "Multiple conduits are not allowed\n");
return -EINVAL;
}
}
@@ -2164,7 +2164,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_add_dscp_prio = felix_port_add_dscp_prio,
.port_del_dscp_prio = felix_port_del_dscp_prio,
.port_set_host_flood = felix_port_set_host_flood,
- .port_change_master = felix_port_change_master,
+ .port_change_conduit = felix_port_change_conduit,
};
EXPORT_SYMBOL_GPL(felix_switch_ops);
@@ -2176,7 +2176,7 @@ struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
if (!dsa_is_user_port(ds, port))
return NULL;
- return dsa_to_port(ds, port)->slave;
+ return dsa_to_port(ds, port)->user;
}
EXPORT_SYMBOL_GPL(felix_port_to_netdev);
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 1d4befe7cfe8..dbf5872fe367 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -77,9 +77,9 @@ struct felix_tag_proto_ops {
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
- int (*change_master)(struct dsa_switch *ds, int port,
- struct net_device *master,
- struct netlink_ext_ack *extack);
+ int (*change_conduit)(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack);
};
extern const struct dsa_switch_ops felix_switch_ops;
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 4ce68e655a63..ec57d9d52072 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -323,14 +323,14 @@ static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
mutex_lock(&mgmt_eth_data->mutex);
- /* Check mgmt_master if is operational */
- if (!priv->mgmt_master) {
+ /* Check if the mgmt_conduit if is operational */
+ if (!priv->mgmt_conduit) {
kfree_skb(skb);
mutex_unlock(&mgmt_eth_data->mutex);
return -EINVAL;
}
- skb->dev = priv->mgmt_master;
+ skb->dev = priv->mgmt_conduit;
reinit_completion(&mgmt_eth_data->rw_done);
@@ -375,14 +375,14 @@ static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
mutex_lock(&mgmt_eth_data->mutex);
- /* Check mgmt_master if is operational */
- if (!priv->mgmt_master) {
+ /* Check if the mgmt_conduit if is operational */
+ if (!priv->mgmt_conduit) {
kfree_skb(skb);
mutex_unlock(&mgmt_eth_data->mutex);
return -EINVAL;
}
- skb->dev = priv->mgmt_master;
+ skb->dev = priv->mgmt_conduit;
reinit_completion(&mgmt_eth_data->rw_done);
@@ -508,7 +508,7 @@ qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
struct qca8k_priv *priv = ctx;
u32 reg = *(u16 *)reg_buf;
- if (priv->mgmt_master &&
+ if (priv->mgmt_conduit &&
!qca8k_read_eth(priv, reg, val_buf, val_len))
return 0;
@@ -531,7 +531,7 @@ qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
u32 reg = *(u16 *)reg_buf;
u32 *val = (u32 *)val_buf;
- if (priv->mgmt_master &&
+ if (priv->mgmt_conduit &&
!qca8k_write_eth(priv, reg, val, val_len))
return 0;
@@ -626,7 +626,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
struct sk_buff *write_skb, *clear_skb, *read_skb;
struct qca8k_mgmt_eth_data *mgmt_eth_data;
u32 write_val, clear_val = 0, val;
- struct net_device *mgmt_master;
+ struct net_device *mgmt_conduit;
int ret, ret1;
bool ack;
@@ -683,18 +683,18 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
*/
mutex_lock(&mgmt_eth_data->mutex);
- /* Check if mgmt_master is operational */
- mgmt_master = priv->mgmt_master;
- if (!mgmt_master) {
+ /* Check if mgmt_conduit is operational */
+ mgmt_conduit = priv->mgmt_conduit;
+ if (!mgmt_conduit) {
mutex_unlock(&mgmt_eth_data->mutex);
mutex_unlock(&priv->bus->mdio_lock);
ret = -EINVAL;
- goto err_mgmt_master;
+ goto err_mgmt_conduit;
}
- read_skb->dev = mgmt_master;
- clear_skb->dev = mgmt_master;
- write_skb->dev = mgmt_master;
+ read_skb->dev = mgmt_conduit;
+ clear_skb->dev = mgmt_conduit;
+ write_skb->dev = mgmt_conduit;
reinit_completion(&mgmt_eth_data->rw_done);
@@ -780,7 +780,7 @@ exit:
return ret;
/* Error handling before lock */
-err_mgmt_master:
+err_mgmt_conduit:
kfree_skb(read_skb);
err_read_skb:
kfree_skb(clear_skb);
@@ -959,12 +959,12 @@ qca8k_mdio_register(struct qca8k_priv *priv)
ds->dst->index, ds->index);
bus->parent = ds->dev;
bus->phy_mask = ~ds->phys_mii_mask;
- ds->slave_mii_bus = bus;
+ ds->user_mii_bus = bus;
/* Check if the devicetree declare the port:phy mapping */
mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
if (of_device_is_available(mdio)) {
- bus->name = "qca8k slave mii";
+ bus->name = "qca8k user mii";
bus->read = qca8k_internal_mdio_read;
bus->write = qca8k_internal_mdio_write;
return devm_of_mdiobus_register(priv->dev, bus, mdio);
@@ -973,7 +973,7 @@ qca8k_mdio_register(struct qca8k_priv *priv)
/* If a mapping can't be found the legacy mapping is used,
* using the qca8k_port_to_phy function
*/
- bus->name = "qca8k-legacy slave mii";
+ bus->name = "qca8k-legacy user mii";
bus->read = qca8k_legacy_mdio_read;
bus->write = qca8k_legacy_mdio_write;
return devm_mdiobus_register(priv->dev, bus);
@@ -1728,10 +1728,10 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
}
static void
-qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
- bool operational)
+qca8k_conduit_change(struct dsa_switch *ds, const struct net_device *conduit,
+ bool operational)
{
- struct dsa_port *dp = master->dsa_ptr;
+ struct dsa_port *dp = conduit->dsa_ptr;
struct qca8k_priv *priv = ds->priv;
/* Ethernet MIB/MDIO is only supported for CPU port 0 */
@@ -1741,7 +1741,7 @@ qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
mutex_lock(&priv->mgmt_eth_data.mutex);
mutex_lock(&priv->mib_eth_data.mutex);
- priv->mgmt_master = operational ? (struct net_device *)master : NULL;
+ priv->mgmt_conduit = operational ? (struct net_device *)conduit : NULL;
mutex_unlock(&priv->mib_eth_data.mutex);
mutex_unlock(&priv->mgmt_eth_data.mutex);
@@ -2016,7 +2016,7 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.get_phy_flags = qca8k_get_phy_flags,
.port_lag_join = qca8k_port_lag_join,
.port_lag_leave = qca8k_port_lag_leave,
- .master_state_change = qca8k_master_change,
+ .conduit_state_change = qca8k_conduit_change,
.connect_tag_protocol = qca8k_connect_tag_protocol,
};
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 9ff0a3c1cb91..9243eff8918d 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -499,7 +499,7 @@ void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
u32 hi = 0;
int ret;
- if (priv->mgmt_master && priv->info->ops->autocast_mib &&
+ if (priv->mgmt_conduit && priv->info->ops->autocast_mib &&
priv->info->ops->autocast_mib(ds, port, data) > 0)
return;
@@ -761,7 +761,7 @@ int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
int ret;
/* We have only have a general MTU setting.
- * DSA always set the CPU port's MTU to the largest MTU of the slave
+ * DSA always set the CPU port's MTU to the largest MTU of the user
* ports.
* Setting MTU just for the CPU port is sufficient to correctly set a
* value for every port.
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
index e8c16e76e34b..90e30c2909e4 100644
--- a/drivers/net/dsa/qca/qca8k-leds.c
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -356,8 +356,8 @@ static struct device *qca8k_cled_hw_control_get_device(struct led_classdev *ldev
dp = dsa_to_port(priv->ds, qca8k_phy_to_port(led->port_num));
if (!dp)
return NULL;
- if (dp->slave)
- return &dp->slave->dev;
+ if (dp->user)
+ return &dp->user->dev;
return NULL;
}
@@ -429,7 +429,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
init_data.default_label = ":port";
init_data.fwnode = led;
init_data.devname_mandatory = true;
- init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->slave_mii_bus->id,
+ init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->user_mii_bus->id,
port_num);
if (!init_data.devicename)
return -ENOMEM;
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 8f88b7db384d..2ac7e88f8da5 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -458,7 +458,7 @@ struct qca8k_priv {
struct mutex reg_mutex;
struct device *dev;
struct gpio_desc *reset_gpio;
- struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
+ struct net_device *mgmt_conduit; /* Track if mdio/mib Ethernet is available */
struct qca8k_mgmt_eth_data mgmt_eth_data;
struct qca8k_mib_eth_data mib_eth_data;
struct qca8k_mdio_cache mdio_cache;
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index bfd11591faf4..755546ed8db6 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -378,25 +378,25 @@ static int realtek_smi_setup_mdio(struct dsa_switch *ds)
return -ENODEV;
}
- priv->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
- if (!priv->slave_mii_bus) {
+ priv->user_mii_bus = devm_mdiobus_alloc(priv->dev);
+ if (!priv->user_mii_bus) {
ret = -ENOMEM;
goto err_put_node;
}
- priv->slave_mii_bus->priv = priv;
- priv->slave_mii_bus->name = "SMI slave MII";
- priv->slave_mii_bus->read = realtek_smi_mdio_read;
- priv->slave_mii_bus->write = realtek_smi_mdio_write;
- snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
+ priv->user_mii_bus->priv = priv;
+ priv->user_mii_bus->name = "SMI user MII";
+ priv->user_mii_bus->read = realtek_smi_mdio_read;
+ priv->user_mii_bus->write = realtek_smi_mdio_write;
+ snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
ds->index);
- priv->slave_mii_bus->dev.of_node = mdio_np;
- priv->slave_mii_bus->parent = priv->dev;
- ds->slave_mii_bus = priv->slave_mii_bus;
+ priv->user_mii_bus->dev.of_node = mdio_np;
+ priv->user_mii_bus->parent = priv->dev;
+ ds->user_mii_bus = priv->user_mii_bus;
- ret = devm_of_mdiobus_register(priv->dev, priv->slave_mii_bus, mdio_np);
+ ret = devm_of_mdiobus_register(priv->dev, priv->user_mii_bus, mdio_np);
if (ret) {
dev_err(priv->dev, "unable to register MDIO bus %s\n",
- priv->slave_mii_bus->id);
+ priv->user_mii_bus->id);
goto err_put_node;
}
@@ -514,8 +514,8 @@ static void realtek_smi_remove(struct platform_device *pdev)
return;
dsa_unregister_switch(priv->ds);
- if (priv->slave_mii_bus)
- of_node_put(priv->slave_mii_bus->dev.of_node);
+ if (priv->user_mii_bus)
+ of_node_put(priv->user_mii_bus->dev.of_node);
/* leave the device reset asserted */
if (priv->reset)
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index 4fa7c6ba874a..790488e9c667 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -54,7 +54,7 @@ struct realtek_priv {
struct regmap *map;
struct regmap *map_nolock;
struct mutex map_lock;
- struct mii_bus *slave_mii_bus;
+ struct mii_bus *user_mii_bus;
struct mii_bus *bus;
int mdio_addr;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index d171c18dd354..0875e4fc9f57 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1144,7 +1144,7 @@ static int rtl8365mb_port_change_mtu(struct dsa_switch *ds, int port,
int frame_size;
/* When a new MTU is set, DSA always sets the CPU port's MTU to the
- * largest MTU of the slave ports. Because the switch only has a global
+ * largest MTU of the user ports. Because the switch only has a global
* RX length register, only allowing CPU port here is enough.
*/
if (!dsa_is_cpu_port(ds, port))
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 1a367e64bc3b..74cee39d73df 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -2688,7 +2688,7 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
}
/* Transfer skb to the host port. */
- dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
+ dsa_enqueue_skb(skb, dsa_to_port(ds, port)->user);
/* Wait until the switch has processed the frame */
do {
@@ -3081,7 +3081,7 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
* ref_clk pin. So port clocking needs to be initialized early, before
* connecting to PHYs is attempted, otherwise they won't respond through MDIO.
* Setting correct PHY link speed does not matter now.
- * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
+ * But dsa_user_phy_setup is called later than sja1105_setup, so the PHY
* bindings are not yet parsed by DSA core. We need to parse early so that we
* can populate the xMII mode parameters table.
*/
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 5b02e9e426fd..96db032b478f 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -554,7 +554,7 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
unsigned int val = XRS_HSR_CFG_HSR_PRP;
struct dsa_port *partner = NULL, *dp;
struct xrs700x *priv = ds->priv;
- struct net_device *slave;
+ struct net_device *user;
int ret, i, hsr_pair[2];
enum hsr_version ver;
bool fwd = false;
@@ -638,8 +638,8 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
- slave = dsa_to_port(ds, hsr_pair[i])->slave;
- slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
+ user = dsa_to_port(ds, hsr_pair[i])->user;
+ user->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
}
return 0;
@@ -650,7 +650,7 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
{
struct dsa_port *partner = NULL, *dp;
struct xrs700x *priv = ds->priv;
- struct net_device *slave;
+ struct net_device *user;
int i, hsr_pair[2];
unsigned int val;
@@ -692,8 +692,8 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
- slave = dsa_to_port(ds, hsr_pair[i])->slave;
- slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
+ user = dsa_to_port(ds, hsr_pair[i])->user;
+ user->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
}
return 0;
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 2a8643e167e1..0d2091e9eb28 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -152,11 +152,8 @@ void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
dma_free_coherent(dev, qcq->cq_size,
qcq->cq_base, qcq->cq_base_pa);
- if (qcq->cq.info)
- vfree(qcq->cq.info);
-
- if (qcq->q.info)
- vfree(qcq->q.info);
+ vfree(qcq->cq.info);
+ vfree(qcq->q.info);
memset(qcq, 0, sizeof(*qcq));
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index ab096795e805..c9faa8540859 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2430,7 +2430,7 @@ static int bcm_sysport_netdevice_event(struct notifier_block *nb,
if (dev->netdev_ops != &bcm_sysport_netdev_ops)
return NOTIFY_DONE;
- if (!dsa_slave_dev_check(info->upper_dev))
+ if (!dsa_user_dev_check(info->upper_dev))
return NOTIFY_DONE;
if (info->linking)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5d7a29f99401..d0359b569afe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3302,8 +3302,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
- pp.flags |= PP_FLAG_PAGE_FRAG;
rxr->page_pool = page_pool_create(&pp);
if (IS_ERR(rxr->page_pool)) {
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 7750702900fa..6f6525983130 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -2259,7 +2259,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
if (tp->snd_una != snd_una) {
tp->snd_una = snd_una;
- tp->rcv_tstamp = tcp_time_stamp(tp);
+ tp->rcv_tstamp = tcp_jiffies32;
if (tp->snd_una == tp->snd_nxt &&
!csk_flag_nochk(csk, CSK_TX_FAILOVER))
csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index 6e14c918e3fb..f188fba021a6 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -143,7 +143,7 @@ struct tsnep_rx {
struct tsnep_queue {
struct tsnep_adapter *adapter;
- char name[IFNAMSIZ + 9];
+ char name[IFNAMSIZ + 16];
struct tsnep_tx *tx;
struct tsnep_rx *rx;
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index f16b6b3e21a6..df40c720e7b2 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1830,14 +1830,14 @@ static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
dev = queue->adapter;
} else {
if (queue->tx && queue->rx)
- sprintf(queue->name, "%s-txrx-%d", name,
- queue->rx->queue_index);
+ snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
+ name, queue->rx->queue_index);
else if (queue->tx)
- sprintf(queue->name, "%s-tx-%d", name,
- queue->tx->queue_index);
+ snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
+ name, queue->tx->queue_index);
else
- sprintf(queue->name, "%s-rx-%d", name,
- queue->rx->queue_index);
+ snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
+ name, queue->rx->queue_index);
handler = tsnep_irq_txrx;
dev = queue;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index cf50368441b7..06117502001f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -4940,8 +4940,7 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
{
struct page_pool_params pp_params = {
- .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
- PP_FLAG_DMA_SYNC_DEV,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = hns3_page_order(ring),
.pool_size = ring->desc_num * hns3_buf_size(ring) /
(PAGE_SIZE << hns3_page_order(ring)),
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 99c0576e6383..66e5807903a0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -881,8 +881,8 @@ static const struct hclge_speed_bit_map speed_bit_map[] = {
{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
- {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
- {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
+ {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS},
+ {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS},
{HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
};
@@ -939,100 +939,98 @@ static void hclge_update_fec_support(struct hclge_mac *mac)
mac->supported);
}
+static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = {
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
+ {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
+};
+
+static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = {
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
+};
+
+static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = {
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
+ {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
+};
+
+static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = {
+ {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+ {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
+};
+
static void hclge_convert_setting_sr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_lr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(
- ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_cr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_kr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_1G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
@@ -1158,10 +1156,10 @@ static u32 hclge_get_max_speed(u16 speed_ability)
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
return HCLGE_MAC_SPEED_200G;
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ if (speed_ability & HCLGE_SUPPORT_100G_BITS)
return HCLGE_MAC_SPEED_100G;
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ if (speed_ability & HCLGE_SUPPORT_50G_BITS)
return HCLGE_MAC_SPEED_50G;
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 02c7aab3546e..51979cf71262 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -185,15 +185,22 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_1G_BIT BIT(0)
#define HCLGE_SUPPORT_10G_BIT BIT(1)
#define HCLGE_SUPPORT_25G_BIT BIT(2)
-#define HCLGE_SUPPORT_50G_BIT BIT(3)
-#define HCLGE_SUPPORT_100G_BIT BIT(4)
+#define HCLGE_SUPPORT_50G_R2_BIT BIT(3)
+#define HCLGE_SUPPORT_100G_R4_BIT BIT(4)
/* to be compatible with exsit board */
#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
#define HCLGE_SUPPORT_200G_BIT BIT(8)
+#define HCLGE_SUPPORT_50G_R1_BIT BIT(9)
+#define HCLGE_SUPPORT_100G_R2_BIT BIT(10)
+
#define HCLGE_SUPPORT_GE \
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
+#define HCLGE_SUPPORT_50G_BITS \
+ (HCLGE_SUPPORT_50G_R2_BIT | HCLGE_SUPPORT_50G_R1_BIT)
+#define HCLGE_SUPPORT_100G_BITS \
+ (HCLGE_SUPPORT_100G_R4_BIT | HCLGE_SUPPORT_100G_R2_BIT)
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
@@ -1076,6 +1083,11 @@ struct hclge_mac_speed_map {
u32 speed_fw; /* speed defined in firmware */
};
+struct hclge_link_mode_bmap {
+ u16 support_bit;
+ enum ethtool_link_mode_bit_indices link_mode;
+};
+
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 6fa79898c42c..5e1ef70d54fe 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -595,9 +595,6 @@ static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
.offset = 0,
};
- if (rxbufq->rx_buf_size == IDPF_RX_BUF_2048)
- pp.flags |= PP_FLAG_PAGE_FRAG;
-
return page_pool_create(&pp);
}
@@ -1160,6 +1157,7 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
*/
static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
{
+ bool flow_sch_en;
int err, i;
vport->txq_grps = kcalloc(vport->num_txq_grp,
@@ -1167,6 +1165,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!vport->txq_grps)
return -ENOMEM;
+ flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_SPLITQ_QSCHED);
+
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
@@ -1195,8 +1196,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
q->txq_grp = tx_qgrp;
hash_init(q->sched_buf_hash);
- if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
- VIRTCHNL2_CAP_SPLITQ_QSCHED))
+ if (flow_sch_en)
set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
}
@@ -1215,6 +1215,9 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
tx_qgrp->complq->desc_count = vport->complq_desc_count;
tx_qgrp->complq->vport = vport;
tx_qgrp->complq->txq_grp = tx_qgrp;
+
+ if (flow_sch_en)
+ __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
}
return 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 9bc85b2f1709..2c1b051fdc0d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1473,7 +1473,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
/* Populate the queue info buffer with all queue context info */
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j;
+ int j, sched_mode;
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
qi[k].queue_id =
@@ -1514,6 +1514,12 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
+ if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
+ sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+ qi[k].sched_mode = cpu_to_le16(sched_mode);
+
k++;
}
@@ -3140,6 +3146,7 @@ restart:
err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task);
+ cancel_delayed_work_sync(&adapter->mbx_task);
idpf_vport_params_buf_rel(adapter);
err_netdev_alloc:
kfree(adapter->vports);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 818ce76185b2..1a42bfded872 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1404,7 +1404,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
}
pp_params.order = get_order(buf_size);
- pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+ pp_params.flags = PP_FLAG_DMA_MAP;
pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
pp_params.nid = NUMA_NO_NODE;
pp_params.dev = pfvf->dev;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 60d49b0f595f..3cf6589cfdac 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3329,7 +3329,7 @@ static int mtk_device_event(struct notifier_block *n, unsigned long event, void
return NOTIFY_DONE;
found:
- if (!dsa_slave_dev_check(dev))
+ if (!dsa_user_dev_check(dev))
return NOTIFY_DONE;
if (__ethtool_get_link_ksettings(dev, &s))
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index e073d2b5542c..fbb5e9d5af13 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -175,7 +175,7 @@ mtk_flow_get_dsa_port(struct net_device **dev)
if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
return -ENODEV;
- *dev = dsa_port_to_master(dp);
+ *dev = dsa_port_to_conduit(dp);
return dp->index;
#else
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
index 65a78e274009..ea0884186d76 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -32,12 +32,12 @@ static struct mtk_wed_wo_memory_region mem_region[] = {
},
};
-static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
+static u32 wo_r32(u32 reg)
{
return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
-static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+static void wo_w32(u32 reg, u32 val)
{
writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
@@ -258,16 +258,12 @@ mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
}
static int
-mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
- struct mtk_wed_wo_memory_region *region)
+mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw)
{
const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data;
const struct mtk_wed_fw_trailer *trailer;
const struct mtk_wed_fw_region *fw_region;
- if (!region->phy_addr || !region->size)
- return 0;
-
trailer_ptr = fw->data + fw->size - sizeof(*trailer);
trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
@@ -275,33 +271,41 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
while (region_ptr < trailer_ptr) {
u32 length;
+ int i;
fw_region = (const struct mtk_wed_fw_region *)region_ptr;
length = le32_to_cpu(fw_region->len);
-
- if (region->phy_addr != le32_to_cpu(fw_region->addr))
+ if (first_region_ptr < ptr + length)
goto next;
- if (region->size < length)
- goto next;
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ struct mtk_wed_wo_memory_region *region;
- if (first_region_ptr < ptr + length)
- goto next;
+ region = &mem_region[i];
+ if (region->phy_addr != le32_to_cpu(fw_region->addr))
+ continue;
- if (region->shared && region->consumed)
- return 0;
+ if (region->size < length)
+ continue;
- if (!region->shared || !region->consumed) {
- memcpy_toio(region->addr, ptr, length);
- region->consumed = true;
- return 0;
+ if (region->shared && region->consumed)
+ break;
+
+ if (!region->shared || !region->consumed) {
+ memcpy_toio(region->addr, ptr, length);
+ region->consumed = true;
+ break;
+ }
}
+
+ if (i == ARRAY_SIZE(mem_region))
+ return -EINVAL;
next:
region_ptr += sizeof(*fw_region);
ptr += length;
}
- return -EINVAL;
+ return 0;
}
static int
@@ -360,24 +364,22 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n",
trailer->chip_id, trailer->num_region);
- for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
- ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]);
- if (ret)
- goto out;
- }
+ ret = mtk_wed_mcu_run_firmware(wo, fw);
+ if (ret)
+ goto out;
/* set the start address */
if (!mtk_wed_is_v3_or_greater(wo->hw) && wo->hw->index)
boot_cr = MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR;
else
boot_cr = MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
- wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
+ wo_w32(boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
/* wo firmware reset */
- wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
+ wo_w32(MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
- val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR) |
+ val = wo_r32(MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR) |
MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
- wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
+ wo_w32(MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
out:
release_firmware(fw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 9325b8f00af0..ea58c6917433 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -897,7 +897,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct page_pool_params pp_params = { 0 };
pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = pool_size;
pp_params.nid = node;
pp_params.dev = rq->pdev;
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 2db5949b4c7e..6961cfc55fb9 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1047,7 +1047,8 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
BIT(HWTSTAMP_TX_ON) |
BIT(HWTSTAMP_TX_ONESTEP_SYNC);
ts_info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_ALL);
+ BIT(HWTSTAMP_FILTER_ALL) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index f940895b14e8..45e209a7d083 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1870,6 +1870,50 @@ static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
return last_head - last_tail - 1;
}
+static void lan743x_rx_cfg_b_tstamp_config(struct lan743x_adapter *adapter,
+ int rx_ts_config)
+{
+ int channel_number;
+ int index;
+ u32 data;
+
+ for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
+ channel_number = adapter->rx[index].channel_number;
+ data = lan743x_csr_read(adapter, RX_CFG_B(channel_number));
+ data &= RX_CFG_B_TS_MASK_;
+ data |= rx_ts_config;
+ lan743x_csr_write(adapter, RX_CFG_B(channel_number),
+ data);
+ }
+}
+
+int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
+ int rx_filter)
+{
+ u32 data;
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ lan743x_rx_cfg_b_tstamp_config(adapter,
+ RX_CFG_B_TS_DESCR_EN_);
+ data = lan743x_csr_read(adapter, PTP_RX_TS_CFG);
+ data |= PTP_RX_TS_CFG_EVENT_MSGS_;
+ lan743x_csr_write(adapter, PTP_RX_TS_CFG, data);
+ break;
+ case HWTSTAMP_FILTER_NONE:
+ lan743x_rx_cfg_b_tstamp_config(adapter,
+ RX_CFG_B_TS_NONE_);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ lan743x_rx_cfg_b_tstamp_config(adapter,
+ RX_CFG_B_TS_ALL_RX_);
+ break;
+ default:
+ return -ERANGE;
+ }
+ return 0;
+}
+
void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
bool enable_timestamping,
bool enable_onestep_sync)
@@ -2944,7 +2988,6 @@ static int lan743x_rx_open(struct lan743x_rx *rx)
data |= RX_CFG_B_RX_PAD_2_;
data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
- data |= RX_CFG_B_TS_ALL_RX_;
if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
data |= RX_CFG_B_RDMABL_512_;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 52609fc13ad9..b648461787d2 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -522,6 +522,8 @@
(((u32)(rx_latency)) & 0x0000FFFF)
#define PTP_CAP_INFO (0x0A60)
#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x00000070) >> 4)
+#define PTP_RX_TS_CFG (0x0A68)
+#define PTP_RX_TS_CFG_EVENT_MSGS_ GENMASK(3, 0)
#define PTP_TX_MOD (0x0AA4)
#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000)
@@ -657,6 +659,9 @@
#define RX_CFG_B(channel) (0xC44 + ((channel) << 6))
#define RX_CFG_B_TS_ALL_RX_ BIT(29)
+#define RX_CFG_B_TS_DESCR_EN_ BIT(28)
+#define RX_CFG_B_TS_NONE_ 0
+#define RX_CFG_B_TS_MASK_ (0xCFFFFFFF)
#define RX_CFG_B_RX_PAD_MASK_ (0x03000000)
#define RX_CFG_B_RX_PAD_0_ (0x00000000)
#define RX_CFG_B_RX_PAD_2_ (0x02000000)
@@ -991,6 +996,9 @@ struct lan743x_rx {
struct sk_buff *skb_head, *skb_tail;
};
+int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
+ int rx_filter);
+
/* SGMII Link Speed Duplex status */
enum lan743x_sgmii_lsd {
POWER_DOWN = 0,
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 39e1066ecd5f..2f04bc77a118 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1493,6 +1493,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
temp = lan743x_csr_read(adapter, PTP_TX_MOD2);
temp |= PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_;
lan743x_csr_write(adapter, PTP_TX_MOD2, temp);
+
+ /* Default Timestamping */
+ lan743x_rx_set_tstamp_mode(adapter, HWTSTAMP_FILTER_NONE);
+
lan743x_ptp_enable(adapter);
lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
lan743x_csr_write(adapter, PTP_INT_EN_SET,
@@ -1653,6 +1657,9 @@ static void lan743x_ptp_disable(struct lan743x_adapter *adapter)
{
struct lan743x_ptp *ptp = &adapter->ptp;
+ /* Disable Timestamping */
+ lan743x_rx_set_tstamp_mode(adapter, HWTSTAMP_FILTER_NONE);
+
mutex_lock(&ptp->command_lock);
if (!lan743x_ptp_is_enabled(adapter)) {
netif_warn(adapter, drv, adapter->netdev,
@@ -1785,6 +1792,8 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
}
+ ret = lan743x_rx_set_tstamp_mode(adapter, config.rx_filter);
+
if (!ret)
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 8e4101628fbd..2635ef8958c8 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -671,7 +671,6 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
skb = netdev_alloc_skb(dev, len);
if (unlikely(!skb)) {
netdev_err(dev, "Unable to allocate sk_buff\n");
- err = -ENOMEM;
break;
}
buf_len = len - ETH_FCS_LEN;
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 3ceb57408ed0..8ef5b0241e64 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Renesas device configuration
+# Renesas network device configuration
#
config NET_VENDOR_RENESAS
@@ -25,9 +25,6 @@ config SH_ETH
select PHYLIB
help
Renesas SuperH Ethernet device driver.
- This driver supporting CPUs are:
- - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740, R8A774x, R8A777x and R8A779x.
config RAVB
tristate "Renesas Ethernet AVB support"
@@ -39,8 +36,6 @@ config RAVB
select PHYLIB
help
Renesas Ethernet AVB device driver.
- This driver supports the following SoCs:
- - R8A779x.
config RENESAS_ETHER_SWITCH
tristate "Renesas Ethernet Switch support"
@@ -51,7 +46,5 @@ config RENESAS_ETHER_SWITCH
select PHYLINK
help
Renesas Ethernet Switch device driver.
- This driver supports the following SoCs:
- - R8A779Fx.
endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 592005893464..e8fd85b5fe8f 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -1,14 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for the Renesas device drivers.
+# Makefile for the Renesas network device drivers
#
obj-$(CONFIG_SH_ETH) += sh_eth.o
ravb-objs := ravb_main.o ravb_ptp.o
-
obj-$(CONFIG_RAVB) += ravb.o
rswitch_drv-objs := rswitch.o rcar_gen4_ptp.o
-
obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch_drv.o
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 23f8bc1cd20d..b0950a318c42 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1928,18 +1928,20 @@ static int davinci_emac_probe(struct platform_device *pdev)
goto err_free_rxchan;
ndev->irq = rc;
- rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
- if (!rc)
- eth_hw_addr_set(ndev, priv->mac_addr);
-
+ /* If the MAC address is not present, read the registers from the SoC */
if (!is_valid_ether_addr(priv->mac_addr)) {
- /* Use random MAC if still none obtained. */
- eth_hw_addr_random(ndev);
- memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
- dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
- priv->mac_addr);
+ rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
+ if (!rc)
+ eth_hw_addr_set(ndev, priv->mac_addr);
+
+ if (!is_valid_ether_addr(priv->mac_addr)) {
+ /* Use random MAC if still none obtained. */
+ eth_hw_addr_random(ndev);
+ memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
+ }
}
-
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &ethtool_ops;
netif_napi_add(ndev, &priv->napi, emac_poll);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index de3bb9da3b13..6c4b64227ac8 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1659,6 +1659,19 @@ static void emac_ndo_get_stats64(struct net_device *ndev,
stats->tx_dropped = ndev->stats.tx_dropped;
}
+static int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+
+ ret = snprintf(name, len, "p%d", emac->port_id);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1669,6 +1682,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_set_rx_mode = emac_ndo_set_rx_mode,
.ndo_eth_ioctl = emac_ndo_ioctl,
.ndo_get_stats64 = emac_ndo_get_stats64,
+ .ndo_get_phys_port_name = emac_ndo_get_phys_port_name,
};
/* get emac_port corresponding to eth_node name */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 23041eeec121..acd9c615d1f4 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -800,57 +800,6 @@ static u8 geneve_get_dsfield(struct sk_buff *skb, struct net_device *dev,
return dsfield;
}
-#if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
- struct net_device *dev,
- struct geneve_sock *gs6,
- struct flowi6 *fl6,
- const struct ip_tunnel_info *info,
- __be16 dport, __be16 sport)
-{
- bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
- struct geneve_dev *geneve = netdev_priv(dev);
- struct dst_entry *dst = NULL;
- struct dst_cache *dst_cache;
- __u8 prio;
-
- if (!gs6)
- return ERR_PTR(-EIO);
-
- memset(fl6, 0, sizeof(*fl6));
- fl6->flowi6_mark = skb->mark;
- fl6->flowi6_proto = IPPROTO_UDP;
- fl6->daddr = info->key.u.ipv6.dst;
- fl6->saddr = info->key.u.ipv6.src;
- fl6->fl6_dport = dport;
- fl6->fl6_sport = sport;
-
- prio = geneve_get_dsfield(skb, dev, info, &use_cache);
- fl6->flowlabel = ip6_make_flowinfo(prio, info->key.label);
- dst_cache = (struct dst_cache *)&info->dst_cache;
- if (use_cache) {
- dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
- if (dst)
- return dst;
- }
- dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6,
- NULL);
- if (IS_ERR(dst)) {
- netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
- return ERR_PTR(-ENETUNREACH);
- }
- if (dst->dev == dev) { /* is this necessary? */
- netdev_dbg(dev, "circular route to %pI6\n", &fl6->daddr);
- dst_release(dst);
- return ERR_PTR(-ELOOP);
- }
-
- if (use_cache)
- dst_cache_set_ip6(dst_cache, dst, &fl6->saddr);
- return dst;
-}
-#endif
-
static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
@@ -967,7 +916,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
const struct ip_tunnel_key *key = &info->key;
struct dst_entry *dst = NULL;
- struct flowi6 fl6;
+ struct in6_addr saddr;
+ bool use_cache;
__u8 prio, ttl;
__be16 sport;
int err;
@@ -975,9 +925,18 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (!pskb_inet_may_pull(skb))
return -EINVAL;
+ if (!gs6)
+ return -EIO;
+
+ use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ prio = geneve_get_dsfield(skb, dev, info, &use_cache);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
- geneve->cfg.info.key.tp_dst, sport);
+
+ dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0,
+ &saddr, key, sport,
+ geneve->cfg.info.key.tp_dst, prio,
+ use_cache ?
+ (struct dst_cache *)&info->dst_cache : NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -999,8 +958,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return -ENOMEM;
}
- unclone->key.u.ipv6.dst = fl6.saddr;
- unclone->key.u.ipv6.src = fl6.daddr;
+ unclone->key.u.ipv6.dst = saddr;
+ unclone->key.u.ipv6.src = info->key.u.ipv6.dst;
}
if (!pskb_may_pull(skb, ETH_HLEN)) {
@@ -1014,12 +973,10 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return -EMSGSIZE;
}
+ prio = ip_tunnel_ecn_encap(prio, ip_hdr(skb), skb);
if (geneve->cfg.collect_md) {
- prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
} else {
- prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
- ip_hdr(skb), skb);
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
else
@@ -1032,7 +989,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return err;
udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
- &fl6.saddr, &fl6.daddr, prio, ttl,
+ &saddr, &key->u.ipv6.dst, prio, ttl,
info->key.label, sport, geneve->cfg.info.key.tp_dst,
!(info->key.tun_flags & TUNNEL_CSUM));
return 0;
@@ -1126,19 +1083,28 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
} else if (ip_tunnel_info_af(info) == AF_INET6) {
struct dst_entry *dst;
- struct flowi6 fl6;
-
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
+ struct in6_addr saddr;
+ bool use_cache;
+ u8 prio;
+
+ if (!gs6)
+ return -EIO;
+
+ use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ prio = geneve_get_dsfield(skb, dev, info, &use_cache);
sport = udp_flow_src_port(geneve->net, skb,
1, USHRT_MAX, true);
- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
- geneve->cfg.info.key.tp_dst, sport);
+ dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0,
+ &saddr, &info->key, sport,
+ geneve->cfg.info.key.tp_dst, prio,
+ use_cache ? &info->dst_cache : NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
dst_release(dst);
- info->key.u.ipv6.src = fl6.saddr;
+ info->key.u.ipv6.src = saddr;
#endif
} else {
return -EINVAL;
diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 495fbe35b6ce..2772a3098543 100644
--- a/drivers/net/mdio/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -437,7 +437,7 @@ static void xgene_mdio_remove(struct platform_device *pdev)
static struct platform_driver xgene_mdio_driver = {
.driver = {
.name = "xgene-mdio",
- .of_match_table = of_match_ptr(xgene_mdio_of_match),
+ .of_match_table = xgene_mdio_of_match,
.acpi_match_table = ACPI_PTR(xgene_mdio_acpi_match),
},
.probe = xgene_mdio_probe,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 0deefd1573cf..9980517ed8b0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -737,10 +737,11 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
if (skb_shared(skb) || skb_head_is_locked(skb) ||
skb_shinfo(skb)->nr_frags ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- u32 size, len, max_head_size, off;
+ u32 size, len, max_head_size, off, truesize, page_offset;
struct sk_buff *nskb;
struct page *page;
int i, head_off;
+ void *va;
/* We need a private copy of the skb and data buffers since
* the ebpf program can modify it. We segment the original skb
@@ -753,14 +754,17 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
goto drop;
+ size = min_t(u32, skb->len, max_head_size);
+ truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM;
+
/* Allocate skb head */
- page = page_pool_dev_alloc_pages(rq->page_pool);
- if (!page)
+ va = page_pool_dev_alloc_va(rq->page_pool, &truesize);
+ if (!va)
goto drop;
- nskb = napi_build_skb(page_address(page), PAGE_SIZE);
+ nskb = napi_build_skb(va, truesize);
if (!nskb) {
- page_pool_put_full_page(rq->page_pool, page, true);
+ page_pool_free_va(rq->page_pool, va, true);
goto drop;
}
@@ -768,7 +772,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
skb_copy_header(nskb, skb);
skb_mark_for_recycle(nskb);
- size = min_t(u32, skb->len, max_head_size);
if (skb_copy_bits(skb, 0, nskb->data, size)) {
consume_skb(nskb);
goto drop;
@@ -783,14 +786,18 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
len = skb->len - off;
for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
- page = page_pool_dev_alloc_pages(rq->page_pool);
+ size = min_t(u32, len, PAGE_SIZE);
+ truesize = size;
+
+ page = page_pool_dev_alloc(rq->page_pool, &page_offset,
+ &truesize);
if (!page) {
consume_skb(nskb);
goto drop;
}
- size = min_t(u32, len, PAGE_SIZE);
- skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE);
+ skb_add_rx_frag(nskb, i, page, page_offset, size,
+ truesize);
if (skb_copy_bits(skb, off, page_address(page),
size)) {
consume_skb(nskb);
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 6f7d45e3cfa2..7b526ae16ed0 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2215,63 +2215,6 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
return 0;
}
-#if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
- struct net_device *dev,
- struct vxlan_sock *sock6,
- struct sk_buff *skb, int oif, u8 tos,
- __be32 label,
- const struct in6_addr *daddr,
- struct in6_addr *saddr,
- __be16 dport, __be16 sport,
- struct dst_cache *dst_cache,
- const struct ip_tunnel_info *info)
-{
- bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
- struct dst_entry *ndst;
- struct flowi6 fl6;
-
- if (!sock6)
- return ERR_PTR(-EIO);
-
- if (tos && !info)
- use_cache = false;
- if (use_cache) {
- ndst = dst_cache_get_ip6(dst_cache, saddr);
- if (ndst)
- return ndst;
- }
-
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = oif;
- fl6.daddr = *daddr;
- fl6.saddr = *saddr;
- fl6.flowlabel = ip6_make_flowinfo(tos, label);
- fl6.flowi6_mark = skb->mark;
- fl6.flowi6_proto = IPPROTO_UDP;
- fl6.fl6_dport = dport;
- fl6.fl6_sport = sport;
-
- ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk,
- &fl6, NULL);
- if (IS_ERR(ndst)) {
- netdev_dbg(dev, "no route to %pI6\n", daddr);
- return ERR_PTR(-ENETUNREACH);
- }
-
- if (unlikely(ndst->dev == dev)) {
- netdev_dbg(dev, "circular route to %pI6\n", daddr);
- dst_release(ndst);
- return ERR_PTR(-ELOOP);
- }
-
- *saddr = fl6.saddr;
- if (use_cache)
- dst_cache_set_ip6(dst_cache, ndst, saddr);
- return ndst;
-}
-#endif
-
/* Bypass encapsulation if the destination is local */
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct vxlan_dev *dst_vxlan, __be32 vni,
@@ -2325,7 +2268,7 @@ drop:
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *vxlan,
- union vxlan_addr *daddr,
+ int addr_family,
__be16 dst_port, int dst_ifindex, __be32 vni,
struct dst_entry *dst,
u32 rt_flags)
@@ -2345,7 +2288,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
dst_release(dst);
dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni,
- daddr->sa.sa_family, dst_port,
+ addr_family, dst_port,
vxlan->cfg.flags);
if (!dst_vxlan) {
dev->stats.tx_errors++;
@@ -2371,13 +2314,12 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_key key;
struct vxlan_dev *vxlan = netdev_priv(dev);
const struct iphdr *old_iph = ip_hdr(skb);
- union vxlan_addr *dst;
- union vxlan_addr remote_ip;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
unsigned int pkt_len = skb->len;
__be16 src_port = 0, dst_port;
struct dst_entry *ndst = NULL;
+ int addr_family;
__u8 tos, ttl;
int ifindex;
int err;
@@ -2386,20 +2328,15 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
__be32 vni = 0;
-#if IS_ENABLED(CONFIG_IPV6)
- union vxlan_addr local_ip;
- __be32 label;
-#endif
info = skb_tunnel_info(skb);
use_cache = ip_tunnel_dst_cache_usable(skb, info);
if (rdst) {
- dst = &rdst->remote_ip;
memset(&key, 0, sizeof(key));
pkey = &key;
- if (vxlan_addr_any(dst)) {
+ if (vxlan_addr_any(&rdst->remote_ip)) {
if (did_rsc) {
/* short-circuited back to local bridge */
vxlan_encap_bypass(skb, vxlan, vxlan,
@@ -2409,11 +2346,12 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
+ addr_family = vxlan->cfg.saddr.sa.sa_family;
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
vni = (rdst->remote_vni) ? : default_vni;
ifindex = rdst->remote_ifindex;
- if (dst->sa.sa_family == AF_INET) {
+ if (addr_family == AF_INET) {
key.u.ipv4.src = vxlan->cfg.saddr.sin.sin_addr.s_addr;
key.u.ipv4.dst = rdst->remote_ip.sin.sin_addr.s_addr;
} else {
@@ -2427,23 +2365,21 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ttl = ip_tunnel_get_ttl(old_iph, skb);
} else {
ttl = vxlan->cfg.ttl;
- if (!ttl && vxlan_addr_multicast(dst))
+ if (!ttl && vxlan_addr_multicast(&rdst->remote_ip))
ttl = 1;
}
-
tos = vxlan->cfg.tos;
if (tos == 1)
tos = ip_tunnel_get_dsfield(old_iph, skb);
if (tos && !info)
use_cache = false;
- if (dst->sa.sa_family == AF_INET)
+ if (addr_family == AF_INET)
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
else
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
#if IS_ENABLED(CONFIG_IPV6)
- local_ip = vxlan->cfg.saddr;
- label = vxlan->cfg.label;
+ key.label = vxlan->cfg.label;
#endif
} else {
if (!info) {
@@ -2451,17 +2387,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dev->name);
goto drop;
}
- remote_ip.sa.sa_family = ip_tunnel_info_af(info);
- if (remote_ip.sa.sa_family == AF_INET) {
- remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
- } else {
- remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
-#if IS_ENABLED(CONFIG_IPV6)
- local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
-#endif
- }
- dst = &remote_ip;
pkey = &info->key;
+ addr_family = ip_tunnel_info_af(info);
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
@@ -2473,16 +2400,13 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ttl = info->key.ttl;
tos = info->key.tos;
-#if IS_ENABLED(CONFIG_IPV6)
- label = info->key.label;
-#endif
udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
}
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true);
rcu_read_lock();
- if (dst->sa.sa_family == AF_INET) {
+ if (addr_family == AF_INET) {
struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
__be16 df = 0;
@@ -2501,7 +2425,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (!info) {
/* Bypass encapsulation if the destination is local */
- err = encap_bypass_if_local(skb, dev, vxlan, dst,
+ err = encap_bypass_if_local(skb, dev, vxlan, AF_INET,
dst_port, ifindex, vni,
&rt->dst, rt->rt_flags);
if (err)
@@ -2555,15 +2479,15 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
+ struct in6_addr saddr;
if (!ifindex)
ifindex = sock6->sock->sk->sk_bound_dev_if;
- ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
- label, &dst->sin6.sin6_addr,
- &local_ip.sin6.sin6_addr,
- dst_port, src_port,
- dst_cache, info);
+ ndst = udp_tunnel6_dst_lookup(skb, dev, vxlan->net, sock6->sock,
+ ifindex, &saddr, pkey,
+ src_port, dst_port, tos,
+ use_cache ? dst_cache : NULL);
if (IS_ERR(ndst)) {
err = PTR_ERR(ndst);
ndst = NULL;
@@ -2573,7 +2497,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (!info) {
u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
- err = encap_bypass_if_local(skb, dev, vxlan, dst,
+ err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6,
dst_port, ifindex, vni,
ndst, rt6i_flags);
if (err)
@@ -2588,16 +2512,13 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
} else if (err) {
if (info) {
struct ip_tunnel_info *unclone;
- struct in6_addr src, dst;
unclone = skb_tunnel_info_unclone(skb);
if (unlikely(!unclone))
goto tx_error;
- src = remote_ip.sin6.sin6_addr;
- dst = local_ip.sin6.sin6_addr;
- unclone->key.u.ipv6.src = src;
- unclone->key.u.ipv6.dst = dst;
+ unclone->key.u.ipv6.src = pkey->u.ipv6.dst;
+ unclone->key.u.ipv6.dst = saddr;
}
vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
@@ -2614,9 +2535,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
- &local_ip.sin6.sin6_addr,
- &dst->sin6.sin6_addr, tos, ttl,
- label, src_port, dst_port, !udp_sum);
+ &saddr, &pkey->u.ipv6.dst, tos, ttl,
+ pkey->label, src_port, dst_port, !udp_sum);
#endif
}
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len);
@@ -3267,10 +3187,14 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
struct dst_entry *ndst;
- ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
- info->key.label, &info->key.u.ipv6.dst,
- &info->key.u.ipv6.src, dport, sport,
- &info->dst_cache, info);
+ if (!sock6)
+ return -EIO;
+
+ ndst = udp_tunnel6_dst_lookup(skb, dev, vxlan->net, sock6->sock,
+ 0, &info->key.u.ipv6.src,
+ &info->key,
+ sport, dport, info->key.tos,
+ &info->dst_cache);
if (IS_ERR(ndst))
return PTR_ERR(ndst);
dst_release(ndst);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index cb76053973aa..51a767121b0d 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -570,7 +570,7 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
{
struct page_pool_params pp_params = {
.order = 0,
- .flags = PP_FLAG_PAGE_FRAG,
+ .flags = 0,
.nid = NUMA_NO_NODE,
.dev = dev->dma_dev,
};
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 6faf27136024..ac15d7c2b200 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -200,13 +200,13 @@ static void channel_free(struct channel *ch)
static void channel_remove(struct channel *ch)
{
struct channel **c = &channels;
- char chid[CTCM_ID_SIZE+1];
+ char chid[CTCM_ID_SIZE];
int ok = 0;
if (ch == NULL)
return;
else
- strncpy(chid, ch->id, CTCM_ID_SIZE);
+ strscpy(chid, ch->id, sizeof(chid));
channel_free(ch);
while (*c) {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index cd783290bde5..6af2511e070c 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -6226,7 +6226,7 @@ static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
if (!new_entry)
goto err_dbg;
- strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
+ strscpy(new_entry->dbf_name, name, sizeof(new_entry->dbf_name));
new_entry->dbf_info = card->debug;
mutex_lock(&qeth_dbf_list_mutex);
list_add(&new_entry->dbf_list, &qeth_dbf_list);
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index c177322f793d..b9dd35d4b8f5 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -28,7 +28,7 @@
/* Source and Destination MAC of follow-up meta frames.
* Whereas the choice of SMAC only affects the unique identification of the
* switch as sender of meta frames, the DMAC must be an address that is present
- * in the DSA master port's multicast MAC filter.
+ * in the DSA conduit port's multicast MAC filter.
* 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588
* over L2 use this address for some purpose already.
*/
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index e15452df9804..6df715b6e51d 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -152,6 +152,7 @@ struct tcp_request_sock {
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
bool is_mptcp;
+ s8 req_usec_ts;
#if IS_ENABLED(CONFIG_MPTCP)
bool drop_req;
#endif
@@ -257,7 +258,8 @@ struct tcp_sock {
u8 compressed_ack;
u8 dup_ack_counter:2,
tlp_retrans:1, /* TLP is a retransmission */
- unused:5;
+ tcp_usec_ts:1, /* TSval values in usec */
+ unused:4;
u32 chrono_start; /* Start time in jiffies of a TCP chrono */
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u8 chrono_type:2, /* current chronograph type */
@@ -576,4 +578,9 @@ void tcp_sock_set_quickack(struct sock *sk, int val);
int tcp_sock_set_syncnt(struct sock *sk, int val);
int tcp_sock_set_user_timeout(struct sock *sk, int val);
+static inline bool dst_tcp_usec_ts(const struct dst_entry *dst)
+{
+ return dst_feature(dst, RTAX_FEATURE_TCP_USEC_TS);
+}
+
#endif /* _LINUX_TCP_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 87d92accc26e..bdee5d649cc6 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1,6 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright 2023 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -673,6 +674,8 @@ enum {
#define HCI_TX_POWER_INVALID 127
#define HCI_RSSI_INVALID 127
+#define HCI_SYNC_HANDLE_INVALID 0xffff
+
#define HCI_ROLE_MASTER 0x00
#define HCI_ROLE_SLAVE 0x01
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index c33348ba1657..20988623c5cc 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -350,6 +350,8 @@ struct hci_dev {
struct list_head list;
struct mutex lock;
+ struct ida unset_handle_ida;
+
const char *name;
unsigned long flags;
__u16 id;
@@ -1290,8 +1292,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
return NULL;
}
-static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *hdev,
- __u8 handle)
+static inline struct hci_conn *
+hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1299,22 +1301,22 @@ static inline struct hci_conn *hci_conn_hash_lookup_big_any_dst(struct hci_dev *
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK)
+ if (c->type != ISO_LINK ||
+ !test_bit(HCI_CONN_PA_SYNC, &c->flags))
continue;
- if (handle != BT_ISO_QOS_BIG_UNSET && handle == c->iso_qos.bcast.big) {
+ if (c->iso_qos.bcast.big == big) {
rcu_read_unlock();
return c;
}
}
-
rcu_read_unlock();
return NULL;
}
static inline struct hci_conn *
-hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big)
+hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1326,7 +1328,7 @@ hci_conn_hash_lookup_pa_sync(struct hci_dev *hdev, __u8 big)
!test_bit(HCI_CONN_PA_SYNC, &c->flags))
continue;
- if (c->iso_qos.bcast.big == big) {
+ if (c->sync_handle == sync_handle) {
rcu_read_unlock();
return c;
}
@@ -1377,6 +1379,26 @@ static inline void hci_conn_hash_list_state(struct hci_dev *hdev,
rcu_read_unlock();
}
+static inline void hci_conn_hash_list_flag(struct hci_dev *hdev,
+ hci_conn_func_t func, __u8 type,
+ __u8 flag, void *data)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ if (!func)
+ return;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && test_bit(flag, &c->flags))
+ func(c, data);
+ }
+
+ rcu_read_unlock();
+}
+
static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -1426,7 +1448,9 @@ int hci_le_create_cis_pending(struct hci_dev *hdev);
int hci_conn_check_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
- u8 role);
+ u8 role, u16 handle);
+struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
+ bdaddr_t *dst, u8 role);
void hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 57eeb07aeb25..6efbc2152146 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -80,6 +80,8 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
u8 *data, u32 flags, u16 min_interval,
u16 max_interval, u16 sync_interval);
+int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance);
+
int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
u8 instance, bool force);
int hci_disable_advertising_sync(struct hci_dev *hdev);
diff --git a/include/net/dsa.h b/include/net/dsa.h
index d98439ea6146..82135fbdb1e6 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -102,11 +102,11 @@ struct dsa_device_ops {
const char *name;
enum dsa_tag_protocol proto;
/* Some tagging protocols either mangle or shift the destination MAC
- * address, in which case the DSA master would drop packets on ingress
+ * address, in which case the DSA conduit would drop packets on ingress
* if what it understands out of the destination MAC address is not in
* its RX filter.
*/
- bool promisc_on_master;
+ bool promisc_on_conduit;
};
struct dsa_lag {
@@ -236,12 +236,12 @@ struct dsa_bridge {
};
struct dsa_port {
- /* A CPU port is physically connected to a master device.
- * A user port exposed to userspace has a slave device.
+ /* A CPU port is physically connected to a conduit device. A user port
+ * exposes a network device to user-space, called 'user' here.
*/
union {
- struct net_device *master;
- struct net_device *slave;
+ struct net_device *conduit;
+ struct net_device *user;
};
/* Copy of the tagging protocol operations, for quicker access
@@ -249,7 +249,7 @@ struct dsa_port {
*/
const struct dsa_device_ops *tag_ops;
- /* Copies for faster access in master receive hot path */
+ /* Copies for faster access in conduit receive hot path */
struct dsa_switch_tree *dst;
struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
@@ -281,9 +281,9 @@ struct dsa_port {
u8 lag_tx_enabled:1;
- /* Master state bits, valid only on CPU ports */
- u8 master_admin_up:1;
- u8 master_oper_up:1;
+ /* conduit state bits, valid only on CPU ports */
+ u8 conduit_admin_up:1;
+ u8 conduit_oper_up:1;
/* Valid only on user ports */
u8 cpu_port_in_lag:1;
@@ -303,7 +303,7 @@ struct dsa_port {
struct list_head list;
/*
- * Original copy of the master netdev ethtool_ops
+ * Original copy of the conduit netdev ethtool_ops
*/
const struct ethtool_ops *orig_ethtool_ops;
@@ -452,10 +452,10 @@ struct dsa_switch {
const struct dsa_switch_ops *ops;
/*
- * Slave mii_bus and devices for the individual ports.
+ * User mii_bus and devices for the individual ports.
*/
u32 phys_mii_mask;
- struct mii_bus *slave_mii_bus;
+ struct mii_bus *user_mii_bus;
/* Ageing Time limits in msecs */
unsigned int ageing_time_min;
@@ -520,10 +520,10 @@ static inline bool dsa_port_is_unused(struct dsa_port *dp)
return dp->type == DSA_PORT_TYPE_UNUSED;
}
-static inline bool dsa_port_master_is_operational(struct dsa_port *dp)
+static inline bool dsa_port_conduit_is_operational(struct dsa_port *dp)
{
- return dsa_port_is_cpu(dp) && dp->master_admin_up &&
- dp->master_oper_up;
+ return dsa_port_is_cpu(dp) && dp->conduit_admin_up &&
+ dp->conduit_oper_up;
}
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
@@ -713,12 +713,12 @@ static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
return dsa_port_lag_dev_get(dp) == lag->dev;
}
-static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp)
+static inline struct net_device *dsa_port_to_conduit(const struct dsa_port *dp)
{
if (dp->cpu_port_in_lag)
return dsa_port_lag_dev_get(dp->cpu_dp);
- return dp->cpu_dp->master;
+ return dp->cpu_dp->conduit;
}
static inline
@@ -732,7 +732,7 @@ struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
else if (dp->hsr_dev)
return dp->hsr_dev;
- return dp->slave;
+ return dp->user;
}
static inline struct net_device *
@@ -834,9 +834,9 @@ struct dsa_switch_ops {
int (*connect_tag_protocol)(struct dsa_switch *ds,
enum dsa_tag_protocol proto);
- int (*port_change_master)(struct dsa_switch *ds, int port,
- struct net_device *master,
- struct netlink_ext_ack *extack);
+ int (*port_change_conduit)(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack);
/* Optional switch-wide initialization and destruction methods */
int (*setup)(struct dsa_switch *ds);
@@ -1233,11 +1233,11 @@ struct dsa_switch_ops {
int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
/*
- * DSA master tracking operations
+ * DSA conduit tracking operations
*/
- void (*master_state_change)(struct dsa_switch *ds,
- const struct net_device *master,
- bool operational);
+ void (*conduit_state_change)(struct dsa_switch *ds,
+ const struct net_device *conduit,
+ bool operational);
};
#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
@@ -1374,9 +1374,9 @@ static inline int dsa_switch_resume(struct dsa_switch *ds)
#endif /* CONFIG_PM_SLEEP */
#if IS_ENABLED(CONFIG_NET_DSA)
-bool dsa_slave_dev_check(const struct net_device *dev);
+bool dsa_user_dev_check(const struct net_device *dev);
#else
-static inline bool dsa_slave_dev_check(const struct net_device *dev)
+static inline bool dsa_user_dev_check(const struct net_device *dev)
{
return false;
}
diff --git a/include/net/dsa_stubs.h b/include/net/dsa_stubs.h
index 361811750a54..6f384897f287 100644
--- a/include/net/dsa_stubs.h
+++ b/include/net/dsa_stubs.h
@@ -13,14 +13,14 @@
extern const struct dsa_stubs *dsa_stubs;
struct dsa_stubs {
- int (*master_hwtstamp_validate)(struct net_device *dev,
- const struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack);
+ int (*conduit_hwtstamp_validate)(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
};
-static inline int dsa_master_hwtstamp_validate(struct net_device *dev,
- const struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack)
+static inline int dsa_conduit_hwtstamp_validate(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
if (!netdev_uses_dsa(dev))
return 0;
@@ -29,18 +29,18 @@ static inline int dsa_master_hwtstamp_validate(struct net_device *dev,
* netdev_uses_dsa() returns true, the dsa_core module is still
* registered, and so, dsa_unregister_stubs() couldn't have run.
* For netdev_uses_dsa() to start returning false, it would imply that
- * dsa_master_teardown() has executed, which requires rtnl_lock().
+ * dsa_conduit_teardown() has executed, which requires rtnl_lock().
*/
ASSERT_RTNL();
- return dsa_stubs->master_hwtstamp_validate(dev, config, extack);
+ return dsa_stubs->conduit_hwtstamp_validate(dev, config, extack);
}
#else
-static inline int dsa_master_hwtstamp_validate(struct net_device *dev,
- const struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack)
+static inline int dsa_conduit_hwtstamp_validate(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return 0;
}
diff --git a/include/net/dst.h b/include/net/dst.h
index f8b8599a0600..f5dfc8fb7b37 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -222,13 +222,6 @@ static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metr
return msecs_to_jiffies(dst_metric(dst, metric));
}
-static inline u32
-dst_allfrag(const struct dst_entry *dst)
-{
- int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
- return ret;
-}
-
static inline int
dst_metric_locked(const struct dst_entry *dst, int metric)
{
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 086d1193c9ef..d0a2f827d5f2 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -44,7 +44,6 @@ struct inet_connection_sock_af_ops {
struct request_sock *req_unhash,
bool *own_req);
u16 net_header_len;
- u16 net_frag_header_len;
u16 sockaddr_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 98e11958cdff..74db6d97cae1 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -244,7 +244,6 @@ struct inet_sock {
};
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
-#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
enum {
INET_FLAGS_PKTINFO = 0,
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 4a8e578405cb..b14999ff55db 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -67,7 +67,8 @@ struct inet_timewait_sock {
/* And these are ours. */
unsigned int tw_transparent : 1,
tw_flowlabel : 20,
- tw_pad : 3, /* 3 bits hole */
+ tw_usec_ts : 1,
+ tw_pad : 2, /* 2 bits hole */
tw_tos : 8;
u32 tw_txhash;
u32 tw_priority;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index b3444c8a6f74..78d38dd88aba 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1133,12 +1133,6 @@ struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, st
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst,
bool connected);
-struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
- struct net_device *dev,
- struct net *net, struct socket *sock,
- struct in6_addr *saddr,
- const struct ip_tunnel_info *info,
- u8 protocol, bool use_cache);
struct dst_entry *ip6_blackhole_route(struct net *net,
struct dst_entry *orig_dst);
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 8f64adf86f5b..4ebd544ae977 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -8,23 +8,46 @@
/**
* DOC: page_pool allocator
*
- * The page_pool allocator is optimized for the XDP mode that
- * uses one frame per-page, but it can fallback on the
- * regular page allocator APIs.
- *
- * Basic use involves replacing alloc_pages() calls with the
- * page_pool_alloc_pages() call. Drivers should use
- * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
- *
- * The API keeps track of in-flight pages, in order to let API users know
- * when it is safe to free a page_pool object. Thus, API users
- * must call page_pool_put_page() to free the page, or attach
- * the page to a page_pool-aware object like skbs marked with
+ * The page_pool allocator is optimized for recycling page or page fragment used
+ * by skb packet and xdp frame.
+ *
+ * Basic use involves replacing and alloc_pages() calls with page_pool_alloc(),
+ * which allocate memory with or without page splitting depending on the
+ * requested memory size.
+ *
+ * If the driver knows that it always requires full pages or its allocations are
+ * always smaller than half a page, it can use one of the more specific API
+ * calls:
+ *
+ * 1. page_pool_alloc_pages(): allocate memory without page splitting when
+ * driver knows that the memory it need is always bigger than half of the page
+ * allocated from page pool. There is no cache line dirtying for 'struct page'
+ * when a page is recycled back to the page pool.
+ *
+ * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver
+ * knows that the memory it need is always smaller than or equal to half of the
+ * page allocated from page pool. Page splitting enables memory saving and thus
+ * avoids TLB/cache miss for data access, but there also is some cost to
+ * implement page splitting, mainly some cache line dirtying/bouncing for
+ * 'struct page' and atomic operation for page->pp_frag_count.
+ *
+ * The API keeps track of in-flight pages, in order to let API users know when
+ * it is safe to free a page_pool object, the API users must call
+ * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or
+ * attach the page_pool object to a page_pool-aware object like skbs marked with
* skb_mark_for_recycle().
*
- * API users must call page_pool_put_page() once on a page, as it
- * will either recycle the page, or in case of refcnt > 1, it will
- * release the DMA mapping and in-flight state accounting.
+ * page_pool_put_page() may be called multi times on the same page if a page is
+ * split into multi fragments. For the last fragment, it will either recycle the
+ * page, or in case of page->_refcount > 1, it will release the DMA mapping and
+ * in-flight state accounting.
+ *
+ * dma_sync_single_range_for_device() is only called for the last fragment when
+ * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the
+ * last freed fragment to do the sync_for_device operation for all fragments in
+ * the same page when a page is split, the API user must setup pool->p.max_len
+ * and pool->p.offset correctly and ensure that page_pool_put_page() is called
+ * with dma_sync_size being -1 for fragment API.
*/
#ifndef _NET_PAGE_POOL_HELPERS_H
#define _NET_PAGE_POOL_HELPERS_H
@@ -73,6 +96,17 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
return page_pool_alloc_pages(pool, gfp);
}
+/**
+ * page_pool_dev_alloc_frag() - allocate a page fragment.
+ * @pool: pool from which to allocate
+ * @offset: offset to the allocated page
+ * @size: requested size
+ *
+ * Get a page fragment from the page allocator or page_pool caches.
+ *
+ * Return:
+ * Return allocated page fragment, otherwise return NULL.
+ */
static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
unsigned int *offset,
unsigned int size)
@@ -82,6 +116,91 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
return page_pool_alloc_frag(pool, offset, size, gfp);
}
+static inline struct page *page_pool_alloc(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size, gfp_t gfp)
+{
+ unsigned int max_size = PAGE_SIZE << pool->p.order;
+ struct page *page;
+
+ if ((*size << 1) > max_size) {
+ *size = max_size;
+ *offset = 0;
+ return page_pool_alloc_pages(pool, gfp);
+ }
+
+ page = page_pool_alloc_frag(pool, offset, *size, gfp);
+ if (unlikely(!page))
+ return NULL;
+
+ /* There is very likely not enough space for another fragment, so append
+ * the remaining size to the current fragment to avoid truesize
+ * underestimate problem.
+ */
+ if (pool->frag_offset + *size > max_size) {
+ *size = max_size - *offset;
+ pool->frag_offset = max_size;
+ }
+
+ return page;
+}
+
+/**
+ * page_pool_dev_alloc() - allocate a page or a page fragment.
+ * @pool: pool from which to allocate
+ * @offset: offset to the allocated page
+ * @size: in as the requested size, out as the allocated size
+ *
+ * Get a page or a page fragment from the page allocator or page_pool caches
+ * depending on the requested size in order to allocate memory with least memory
+ * utilization and performance penalty.
+ *
+ * Return:
+ * Return allocated page or page fragment, otherwise return NULL.
+ */
+static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc(pool, offset, size, gfp);
+}
+
+static inline void *page_pool_alloc_va(struct page_pool *pool,
+ unsigned int *size, gfp_t gfp)
+{
+ unsigned int offset;
+ struct page *page;
+
+ /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */
+ page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM);
+ if (unlikely(!page))
+ return NULL;
+
+ return page_address(page) + offset;
+}
+
+/**
+ * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its
+ * va.
+ * @pool: pool from which to allocate
+ * @size: in as the requested size, out as the allocated size
+ *
+ * This is just a thin wrapper around the page_pool_alloc() API, and
+ * it returns va of the allocated page or page fragment.
+ *
+ * Return:
+ * Return the va for the allocated page or page fragment, otherwise return NULL.
+ */
+static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
+ unsigned int *size)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_va(pool, size, gfp);
+}
+
/**
* page_pool_get_dma_dir() - Retrieve the stored DMA direction.
* @pool: pool from which page was allocated
@@ -115,28 +234,49 @@ static inline long page_pool_defrag_page(struct page *page, long nr)
long ret;
/* If nr == pp_frag_count then we have cleared all remaining
- * references to the page. No need to actually overwrite it, instead
- * we can leave this to be overwritten by the calling function.
+ * references to the page:
+ * 1. 'n == 1': no need to actually overwrite it.
+ * 2. 'n != 1': overwrite it with one, which is the rare case
+ * for pp_frag_count draining.
*
- * The main advantage to doing this is that an atomic_read is
- * generally a much cheaper operation than an atomic update,
- * especially when dealing with a page that may be partitioned
- * into only 2 or 3 pieces.
+ * The main advantage to doing this is that not only we avoid a atomic
+ * update, as an atomic_read is generally a much cheaper operation than
+ * an atomic update, especially when dealing with a page that may be
+ * partitioned into only 2 or 3 pieces; but also unify the pp_frag_count
+ * handling by ensuring all pages have partitioned into only 1 piece
+ * initially, and only overwrite it when the page is partitioned into
+ * more than one piece.
*/
- if (atomic_long_read(&page->pp_frag_count) == nr)
+ if (atomic_long_read(&page->pp_frag_count) == nr) {
+ /* As we have ensured nr is always one for constant case using
+ * the BUILD_BUG_ON(), only need to handle the non-constant case
+ * here for pp_frag_count draining, which is a rare case.
+ */
+ BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
+ if (!__builtin_constant_p(nr))
+ atomic_long_set(&page->pp_frag_count, 1);
+
return 0;
+ }
ret = atomic_long_sub_return(nr, &page->pp_frag_count);
WARN_ON(ret < 0);
+
+ /* We are the last user here too, reset pp_frag_count back to 1 to
+ * ensure all pages have been partitioned into 1 piece initially,
+ * this should be the rare case when the last two fragment users call
+ * page_pool_defrag_page() currently.
+ */
+ if (unlikely(!ret))
+ atomic_long_set(&page->pp_frag_count, 1);
+
return ret;
}
-static inline bool page_pool_is_last_frag(struct page_pool *pool,
- struct page *page)
+static inline bool page_pool_is_last_frag(struct page *page)
{
- /* If fragments aren't enabled or count is 0 we were the last user */
- return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
- (page_pool_defrag_page(page, 1) == 0);
+ /* If page_pool_defrag_page() returns 0, we were the last user */
+ return page_pool_defrag_page(page, 1) == 0;
}
/**
@@ -161,7 +301,7 @@ static inline void page_pool_put_page(struct page_pool *pool,
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
- if (!page_pool_is_last_frag(pool, page))
+ if (!page_pool_is_last_frag(page))
return;
page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
@@ -201,6 +341,20 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
(sizeof(dma_addr_t) > sizeof(unsigned long))
/**
+ * page_pool_free_va() - free a va into the page_pool
+ * @pool: pool from which va was allocated
+ * @va: va to be freed
+ * @allow_direct: freed by the consumer, allow lockless caching
+ *
+ * Free a va allocated from page_pool_allo_va().
+ */
+static inline void page_pool_free_va(struct page_pool *pool, void *va,
+ bool allow_direct)
+{
+ page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
+}
+
+/**
* page_pool_get_dma_addr() - Retrieve the stored DMA address.
* @page: page allocated from a page pool
*
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 887e7946a597..6fc5134095ed 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -17,10 +17,8 @@
* Please note DMA-sync-for-CPU is still
* device driver responsibility
*/
-#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
- PP_FLAG_DMA_SYNC_DEV |\
- PP_FLAG_PAGE_FRAG)
+ PP_FLAG_DMA_SYNC_DEV)
/*
* Fast allocation side cache array/stack
@@ -45,7 +43,7 @@ struct pp_alloc_cache {
/**
* struct page_pool_params - page pool parameters
- * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG
+ * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
* @order: 2^order pages on allocation
* @pool_size: size of the ptr_ring
* @nid: NUMA node id to allocate from pages from
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
index b24ea2d9400b..8a6dbfb23336 100644
--- a/include/net/tc_act/tc_ct.h
+++ b/include/net/tc_act/tc_ct.h
@@ -22,6 +22,7 @@ struct tcf_ct_params {
struct nf_nat_range2 range;
bool ipv4_range;
+ bool put_labels;
u16 ct_action;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index bad304d173a5..993b7fcd4e46 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -166,7 +166,12 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
#define MAX_TCP_KEEPCNT 127
#define MAX_TCP_SYNCNT 127
-#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
+/* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
+ * to avoid overflows. This assumes a clock smaller than 1 Mhz.
+ * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
+ */
+#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
+
#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
* after this time. It should be equal
* (or greater than) TCP_TIMEWAIT_LEN
@@ -798,22 +803,31 @@ static inline u64 tcp_clock_us(void)
return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
}
-/* This should only be used in contexts where tp->tcp_mstamp is up to date */
-static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+static inline u64 tcp_clock_ms(void)
+{
+ return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
+}
+
+/* TCP Timestamp included in TS option (RFC 1323) can either use ms
+ * or usec resolution. Each socket carries a flag to select one or other
+ * resolution, as the route attribute could change anytime.
+ * Each flow must stick to initial resolution.
+ */
+static inline u32 tcp_clock_ts(bool usec_ts)
{
- return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
+ return usec_ts ? tcp_clock_us() : tcp_clock_ms();
}
-/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
-static inline u32 tcp_ns_to_ts(u64 ns)
+static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
{
- return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+ return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
}
-/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
-static inline u32 tcp_time_stamp_raw(void)
+static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
{
- return tcp_ns_to_ts(tcp_clock_ns());
+ if (tp->tcp_usec_ts)
+ return tp->tcp_mstamp;
+ return tcp_time_stamp_ms(tp);
}
void tcp_mstamp_refresh(struct tcp_sock *tp);
@@ -823,17 +837,30 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
return max_t(s64, t1 - t0, 0);
}
-static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
-{
- return tcp_ns_to_ts(skb->skb_mstamp_ns);
-}
-
/* provide the departure time in us unit */
static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
{
return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
}
+/* Provide skb TSval in usec or ms unit */
+static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
+{
+ if (usec_ts)
+ return tcp_skb_timestamp_us(skb);
+
+ return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
+}
+
+static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
+{
+ return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
+}
+
+static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
+{
+ return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
+}
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
@@ -1462,13 +1489,15 @@ static inline int tcp_space_from_win(const struct sock *sk, int win)
return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
}
+/* Assume a conservative default of 1200 bytes of payload per 4K page.
+ * This may be adjusted later in tcp_measure_rcv_mss().
+ */
+#define TCP_DEFAULT_SCALING_RATIO ((1200 << TCP_RMEM_TO_WIN_SCALE) / \
+ SKB_TRUESIZE(4096))
+
static inline void tcp_scaling_ratio_init(struct sock *sk)
{
- /* Assume a conservative default of 1200 bytes of payload per 4K page.
- * This may be adjusted later in tcp_measure_rcv_mss().
- */
- tcp_sk(sk)->scaling_ratio = (1200 << TCP_RMEM_TO_WIN_SCALE) /
- SKB_TRUESIZE(4096);
+ tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
}
/* Note: caller must be prepared to deal with negative returns */
@@ -1599,7 +1628,7 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
return true;
if (unlikely(!time_before32(ktime_get_seconds(),
- rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
+ rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
return true;
/*
* Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 4d0578fab01a..d716214fe03d 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -169,6 +169,14 @@ struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
const struct ip_tunnel_key *key,
__be16 sport, __be16 dport, u8 tos,
struct dst_cache *dst_cache);
+struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net,
+ struct socket *sock, int oif,
+ struct in6_addr *saddr,
+ const struct ip_tunnel_key *key,
+ __be16 sport, __be16 dport, u8 dsfield,
+ struct dst_cache *dst_cache);
struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
__be16 flags, __be64 tunnel_id,
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index cd4b82458d1b..b3c8383d342d 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -265,7 +265,7 @@ enum {
* Documentation/networking/devlink/devlink-flash.rst
*
*/
-enum {
+enum devlink_flash_overwrite {
DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT,
DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT,
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 9f8a3da0f14f..f4191be137a4 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -1394,7 +1394,9 @@ enum {
enum {
IFLA_DSA_UNSPEC,
- IFLA_DSA_MASTER,
+ IFLA_DSA_CONDUIT,
+ /* Deprecated, use IFLA_DSA_CONDUIT instead */
+ IFLA_DSA_MASTER = IFLA_DSA_CONDUIT,
__IFLA_DSA_MAX,
};
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index ee9c49f949a2..64ecc8a3f9f2 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -23,91 +23,24 @@
#define MPTCP_SUBFLOW_FLAG_CONNECTED _BITUL(7)
#define MPTCP_SUBFLOW_FLAG_MAPVALID _BITUL(8)
-enum {
- MPTCP_SUBFLOW_ATTR_UNSPEC,
- MPTCP_SUBFLOW_ATTR_TOKEN_REM,
- MPTCP_SUBFLOW_ATTR_TOKEN_LOC,
- MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
- MPTCP_SUBFLOW_ATTR_MAP_SEQ,
- MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
- MPTCP_SUBFLOW_ATTR_SSN_OFFSET,
- MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
- MPTCP_SUBFLOW_ATTR_FLAGS,
- MPTCP_SUBFLOW_ATTR_ID_REM,
- MPTCP_SUBFLOW_ATTR_ID_LOC,
- MPTCP_SUBFLOW_ATTR_PAD,
- __MPTCP_SUBFLOW_ATTR_MAX
-};
-
-#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1)
-
-/* netlink interface */
-#define MPTCP_PM_NAME "mptcp_pm"
#define MPTCP_PM_CMD_GRP_NAME "mptcp_pm_cmds"
#define MPTCP_PM_EV_GRP_NAME "mptcp_pm_events"
-#define MPTCP_PM_VER 0x1
-
-/*
- * ATTR types defined for MPTCP
- */
-enum {
- MPTCP_PM_ATTR_UNSPEC,
-
- MPTCP_PM_ATTR_ADDR, /* nested address */
- MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */
- MPTCP_PM_ATTR_SUBFLOWS, /* u32 */
- MPTCP_PM_ATTR_TOKEN, /* u32 */
- MPTCP_PM_ATTR_LOC_ID, /* u8 */
- MPTCP_PM_ATTR_ADDR_REMOTE, /* nested address */
-
- __MPTCP_PM_ATTR_MAX
-};
-
-#define MPTCP_PM_ATTR_MAX (__MPTCP_PM_ATTR_MAX - 1)
-
-enum {
- MPTCP_PM_ADDR_ATTR_UNSPEC,
-
- MPTCP_PM_ADDR_ATTR_FAMILY, /* u16 */
- MPTCP_PM_ADDR_ATTR_ID, /* u8 */
- MPTCP_PM_ADDR_ATTR_ADDR4, /* struct in_addr */
- MPTCP_PM_ADDR_ATTR_ADDR6, /* struct in6_addr */
- MPTCP_PM_ADDR_ATTR_PORT, /* u16 */
- MPTCP_PM_ADDR_ATTR_FLAGS, /* u32 */
- MPTCP_PM_ADDR_ATTR_IF_IDX, /* s32 */
-
- __MPTCP_PM_ADDR_ATTR_MAX
-};
-
-#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1)
-
-#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
-#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
-#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
-#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3)
-#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4)
-enum {
- MPTCP_PM_CMD_UNSPEC,
+#include <linux/mptcp_pm.h>
- MPTCP_PM_CMD_ADD_ADDR,
- MPTCP_PM_CMD_DEL_ADDR,
- MPTCP_PM_CMD_GET_ADDR,
- MPTCP_PM_CMD_FLUSH_ADDRS,
- MPTCP_PM_CMD_SET_LIMITS,
- MPTCP_PM_CMD_GET_LIMITS,
- MPTCP_PM_CMD_SET_FLAGS,
- MPTCP_PM_CMD_ANNOUNCE,
- MPTCP_PM_CMD_REMOVE,
- MPTCP_PM_CMD_SUBFLOW_CREATE,
- MPTCP_PM_CMD_SUBFLOW_DESTROY,
-
- __MPTCP_PM_CMD_AFTER_LAST
-};
+/* for backward compatibility */
+#define __MPTCP_PM_CMD_AFTER_LAST __MPTCP_PM_CMD_MAX
+#define __MPTCP_ATTR_AFTER_LAST __MPTCP_ATTR_MAX
#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0)
#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1)
+#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
+#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
+#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
+#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3)
+#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4)
+
struct mptcp_info {
__u8 mptcpi_subflows;
__u8 mptcpi_add_addr_signal;
@@ -130,93 +63,6 @@ struct mptcp_info {
__u64 mptcpi_bytes_acked;
};
-/*
- * MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport
- * A new MPTCP connection has been created. It is the good time to allocate
- * memory and send ADD_ADDR if needed. Depending on the traffic-patterns
- * it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent.
- *
- * MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport
- * A MPTCP connection is established (can start new subflows).
- *
- * MPTCP_EVENT_CLOSED: token
- * A MPTCP connection has stopped.
- *
- * MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport]
- * A new address has been announced by the peer.
- *
- * MPTCP_EVENT_REMOVED: token, rem_id
- * An address has been lost by the peer.
- *
- * MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id,
- * saddr4 | saddr6, daddr4 | daddr6, sport,
- * dport, backup, if_idx [, error]
- * A new subflow has been established. 'error' should not be set.
- *
- * MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
- * daddr4 | daddr6, sport, dport, backup, if_idx
- * [, error]
- * A subflow has been closed. An error (copy of sk_err) could be set if an
- * error has been detected for this subflow.
- *
- * MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
- * daddr4 | daddr6, sport, dport, backup, if_idx
- * [, error]
- * The priority of a subflow has changed. 'error' should not be set.
- *
- * MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6
- * A new PM listener is created.
- *
- * MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6
- * A PM listener is closed.
- */
-enum mptcp_event_type {
- MPTCP_EVENT_UNSPEC = 0,
- MPTCP_EVENT_CREATED = 1,
- MPTCP_EVENT_ESTABLISHED = 2,
- MPTCP_EVENT_CLOSED = 3,
-
- MPTCP_EVENT_ANNOUNCED = 6,
- MPTCP_EVENT_REMOVED = 7,
-
- MPTCP_EVENT_SUB_ESTABLISHED = 10,
- MPTCP_EVENT_SUB_CLOSED = 11,
-
- MPTCP_EVENT_SUB_PRIORITY = 13,
-
- MPTCP_EVENT_LISTENER_CREATED = 15,
- MPTCP_EVENT_LISTENER_CLOSED = 16,
-};
-
-enum mptcp_event_attr {
- MPTCP_ATTR_UNSPEC = 0,
-
- MPTCP_ATTR_TOKEN, /* u32 */
- MPTCP_ATTR_FAMILY, /* u16 */
- MPTCP_ATTR_LOC_ID, /* u8 */
- MPTCP_ATTR_REM_ID, /* u8 */
- MPTCP_ATTR_SADDR4, /* be32 */
- MPTCP_ATTR_SADDR6, /* struct in6_addr */
- MPTCP_ATTR_DADDR4, /* be32 */
- MPTCP_ATTR_DADDR6, /* struct in6_addr */
- MPTCP_ATTR_SPORT, /* be16 */
- MPTCP_ATTR_DPORT, /* be16 */
- MPTCP_ATTR_BACKUP, /* u8 */
- MPTCP_ATTR_ERROR, /* u8 */
- MPTCP_ATTR_FLAGS, /* u16 */
- MPTCP_ATTR_TIMEOUT, /* u32 */
- MPTCP_ATTR_IF_IDX, /* s32 */
- MPTCP_ATTR_RESET_REASON,/* u32 */
- MPTCP_ATTR_RESET_FLAGS, /* u32 */
- MPTCP_ATTR_SERVER_SIDE, /* u8 */
-
- __MPTCP_ATTR_AFTER_LAST
-};
-
-#define MPTCP_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1)
-
/* MPTCP Reset reason codes, rfc8684 */
#define MPTCP_RST_EUNSPEC 0
#define MPTCP_RST_EMPTCP 1
diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h
new file mode 100644
index 000000000000..0ad598fe940b
--- /dev/null
+++ b/include/uapi/linux/mptcp_pm.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/mptcp.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_MPTCP_PM_H
+#define _UAPI_LINUX_MPTCP_PM_H
+
+#define MPTCP_PM_NAME "mptcp_pm"
+#define MPTCP_PM_VER 1
+
+/**
+ * enum mptcp_event_type
+ * @MPTCP_EVENT_UNSPEC: unused event
+ * @MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+ * sport, dport A new MPTCP connection has been created. It is the good time
+ * to allocate memory and send ADD_ADDR if needed. Depending on the
+ * traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED
+ * is sent.
+ * @MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+ * sport, dport A MPTCP connection is established (can start new subflows).
+ * @MPTCP_EVENT_CLOSED: token A MPTCP connection has stopped.
+ * @MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] A
+ * new address has been announced by the peer.
+ * @MPTCP_EVENT_REMOVED: token, rem_id An address has been lost by the peer.
+ * @MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, saddr4 |
+ * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error] A new
+ * subflow has been established. 'error' should not be set.
+ * @MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx [, error] A subflow has been
+ * closed. An error (copy of sk_err) could be set if an error has been
+ * detected for this subflow.
+ * @MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx [, error] The priority of a
+ * subflow has changed. 'error' should not be set.
+ * @MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 A new PM
+ * listener is created.
+ * @MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 A PM listener
+ * is closed.
+ */
+enum mptcp_event_type {
+ MPTCP_EVENT_UNSPEC,
+ MPTCP_EVENT_CREATED,
+ MPTCP_EVENT_ESTABLISHED,
+ MPTCP_EVENT_CLOSED,
+ MPTCP_EVENT_ANNOUNCED = 6,
+ MPTCP_EVENT_REMOVED,
+ MPTCP_EVENT_SUB_ESTABLISHED = 10,
+ MPTCP_EVENT_SUB_CLOSED,
+ MPTCP_EVENT_SUB_PRIORITY = 13,
+ MPTCP_EVENT_LISTENER_CREATED = 15,
+ MPTCP_EVENT_LISTENER_CLOSED,
+};
+
+enum {
+ MPTCP_PM_ADDR_ATTR_UNSPEC,
+ MPTCP_PM_ADDR_ATTR_FAMILY,
+ MPTCP_PM_ADDR_ATTR_ID,
+ MPTCP_PM_ADDR_ATTR_ADDR4,
+ MPTCP_PM_ADDR_ATTR_ADDR6,
+ MPTCP_PM_ADDR_ATTR_PORT,
+ MPTCP_PM_ADDR_ATTR_FLAGS,
+ MPTCP_PM_ADDR_ATTR_IF_IDX,
+
+ __MPTCP_PM_ADDR_ATTR_MAX
+};
+#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1)
+
+enum {
+ MPTCP_SUBFLOW_ATTR_UNSPEC,
+ MPTCP_SUBFLOW_ATTR_TOKEN_REM,
+ MPTCP_SUBFLOW_ATTR_TOKEN_LOC,
+ MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
+ MPTCP_SUBFLOW_ATTR_MAP_SEQ,
+ MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
+ MPTCP_SUBFLOW_ATTR_SSN_OFFSET,
+ MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
+ MPTCP_SUBFLOW_ATTR_FLAGS,
+ MPTCP_SUBFLOW_ATTR_ID_REM,
+ MPTCP_SUBFLOW_ATTR_ID_LOC,
+ MPTCP_SUBFLOW_ATTR_PAD,
+
+ __MPTCP_SUBFLOW_ATTR_MAX
+};
+#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1)
+
+enum {
+ MPTCP_PM_ENDPOINT_ADDR = 1,
+
+ __MPTCP_PM_ENDPOINT_MAX
+};
+#define MPTCP_PM_ENDPOINT_MAX (__MPTCP_PM_ENDPOINT_MAX - 1)
+
+enum {
+ MPTCP_PM_ATTR_UNSPEC,
+ MPTCP_PM_ATTR_ADDR,
+ MPTCP_PM_ATTR_RCV_ADD_ADDRS,
+ MPTCP_PM_ATTR_SUBFLOWS,
+ MPTCP_PM_ATTR_TOKEN,
+ MPTCP_PM_ATTR_LOC_ID,
+ MPTCP_PM_ATTR_ADDR_REMOTE,
+
+ __MPTCP_PM_ATTR_MAX
+};
+#define MPTCP_PM_ATTR_MAX (__MPTCP_PM_ATTR_MAX - 1)
+
+enum mptcp_event_attr {
+ MPTCP_ATTR_UNSPEC,
+ MPTCP_ATTR_TOKEN,
+ MPTCP_ATTR_FAMILY,
+ MPTCP_ATTR_LOC_ID,
+ MPTCP_ATTR_REM_ID,
+ MPTCP_ATTR_SADDR4,
+ MPTCP_ATTR_SADDR6,
+ MPTCP_ATTR_DADDR4,
+ MPTCP_ATTR_DADDR6,
+ MPTCP_ATTR_SPORT,
+ MPTCP_ATTR_DPORT,
+ MPTCP_ATTR_BACKUP,
+ MPTCP_ATTR_ERROR,
+ MPTCP_ATTR_FLAGS,
+ MPTCP_ATTR_TIMEOUT,
+ MPTCP_ATTR_IF_IDX,
+ MPTCP_ATTR_RESET_REASON,
+ MPTCP_ATTR_RESET_FLAGS,
+ MPTCP_ATTR_SERVER_SIDE,
+
+ __MPTCP_ATTR_MAX
+};
+#define MPTCP_ATTR_MAX (__MPTCP_ATTR_MAX - 1)
+
+enum {
+ MPTCP_PM_CMD_UNSPEC,
+ MPTCP_PM_CMD_ADD_ADDR,
+ MPTCP_PM_CMD_DEL_ADDR,
+ MPTCP_PM_CMD_GET_ADDR,
+ MPTCP_PM_CMD_FLUSH_ADDRS,
+ MPTCP_PM_CMD_SET_LIMITS,
+ MPTCP_PM_CMD_GET_LIMITS,
+ MPTCP_PM_CMD_SET_FLAGS,
+ MPTCP_PM_CMD_ANNOUNCE,
+ MPTCP_PM_CMD_REMOVE,
+ MPTCP_PM_CMD_SUBFLOW_CREATE,
+ MPTCP_PM_CMD_SUBFLOW_DESTROY,
+
+ __MPTCP_PM_CMD_MAX
+};
+#define MPTCP_PM_CMD_MAX (__MPTCP_PM_CMD_MAX - 1)
+
+#endif /* _UAPI_LINUX_MPTCP_PM_H */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 51c13cf9c5ae..3b687d20c9ed 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -502,13 +502,17 @@ enum {
#define RTAX_MAX (__RTAX_MAX - 1)
-#define RTAX_FEATURE_ECN (1 << 0)
-#define RTAX_FEATURE_SACK (1 << 1)
-#define RTAX_FEATURE_TIMESTAMP (1 << 2)
-#define RTAX_FEATURE_ALLFRAG (1 << 3)
-
-#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \
- RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG)
+#define RTAX_FEATURE_ECN (1 << 0)
+#define RTAX_FEATURE_SACK (1 << 1) /* unused */
+#define RTAX_FEATURE_TIMESTAMP (1 << 2) /* unused */
+#define RTAX_FEATURE_ALLFRAG (1 << 3) /* unused */
+#define RTAX_FEATURE_TCP_USEC_TS (1 << 4)
+
+#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | \
+ RTAX_FEATURE_SACK | \
+ RTAX_FEATURE_TIMESTAMP | \
+ RTAX_FEATURE_ALLFRAG | \
+ RTAX_FEATURE_TCP_USEC_TS)
struct rta_session {
__u8 proto;
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index d1d08da6331a..8aa3916e14f6 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -170,6 +170,7 @@ enum tcp_fastopen_client_fail {
#define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */
#define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */
#define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */
+#define TCPI_OPT_USEC_TS 64 /* usec timestamps */
/*
* Sender's congestion state indicating normal or abnormal situations
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index 466353b3dde4..54e7fb1a4ee5 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -116,8 +116,6 @@ static int atm_uevent(const struct device *cdev, struct kobj_uevent_env *env)
return -ENODEV;
adev = to_atm_dev(cdev);
- if (!adev)
- return -ENODEV;
if (add_uevent_var(env, "NAME=%s%d", adev->type, adev->number))
return -ENOMEM;
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index 2134f92bd7ac..5d698f19868c 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -109,7 +109,7 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
struct hci_conn *hcon;
u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
- hcon = hci_conn_add(hdev, AMP_LINK, dst, role);
+ hcon = hci_conn_add(hdev, AMP_LINK, dst, role, __next_handle(mgr));
if (!hcon)
return NULL;
@@ -117,7 +117,6 @@ struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
hcon->state = BT_CONNECT;
hcon->attempt++;
- hcon->handle = __next_handle(mgr);
hcon->remote_id = remote_id;
hcon->amp_mgr = amp_mgr_get(mgr);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 73470cc3518a..2cee330188ce 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -153,6 +153,9 @@ static void hci_conn_cleanup(struct hci_conn *conn)
hci_conn_hash_del(hdev, conn);
+ if (HCI_CONN_HANDLE_UNSET(conn->handle))
+ ida_free(&hdev->unset_handle_ida, conn->handle);
+
if (conn->cleanup)
conn->cleanup(conn);
@@ -169,13 +172,11 @@ static void hci_conn_cleanup(struct hci_conn *conn)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
}
- hci_conn_del_sysfs(conn);
-
debugfs_remove_recursive(conn->debugfs);
- hci_dev_put(hdev);
+ hci_conn_del_sysfs(conn);
- hci_conn_put(conn);
+ hci_dev_put(hdev);
}
static void hci_acl_create_connection(struct hci_conn *conn)
@@ -759,6 +760,7 @@ static int terminate_big_sync(struct hci_dev *hdev, void *data)
bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
+ hci_disable_per_advertising_sync(hdev, d->bis);
hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
/* Only terminate BIG if it has been created */
@@ -814,6 +816,17 @@ static int big_terminate_sync(struct hci_dev *hdev, void *data)
return 0;
}
+static void find_bis(struct hci_conn *conn, void *data)
+{
+ struct iso_list_data *d = data;
+
+ /* Ignore if BIG doesn't match */
+ if (d->big != conn->iso_qos.bcast.big)
+ return;
+
+ d->count++;
+}
+
static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
{
struct iso_list_data *d;
@@ -825,10 +838,27 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c
if (!d)
return -ENOMEM;
+ memset(d, 0, sizeof(*d));
d->big = big;
d->sync_handle = conn->sync_handle;
- d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags);
- d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
+
+ if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
+ hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
+ HCI_CONN_PA_SYNC, d);
+
+ if (!d->count)
+ d->pa_sync_term = true;
+
+ d->count = 0;
+ }
+
+ if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
+ hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
+ HCI_CONN_BIG_SYNC, d);
+
+ if (!d->count)
+ d->big_sync_term = true;
+ }
ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
terminate_big_destroy);
@@ -864,12 +894,6 @@ static void bis_cleanup(struct hci_conn *conn)
hci_le_terminate_big(hdev, conn);
} else {
- bis = hci_conn_hash_lookup_big_any_dst(hdev,
- conn->iso_qos.bcast.big);
-
- if (bis)
- return;
-
hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
conn);
}
@@ -928,31 +952,18 @@ static void cis_cleanup(struct hci_conn *conn)
hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
}
-static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev)
+static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
{
- struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *c;
- u16 handle = HCI_CONN_HANDLE_MAX + 1;
-
- rcu_read_lock();
-
- list_for_each_entry_rcu(c, &h->list, list) {
- /* Find the first unused handle */
- if (handle == 0xffff || c->handle != handle)
- break;
- handle++;
- }
- rcu_read_unlock();
-
- return handle;
+ return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
+ U16_MAX, GFP_ATOMIC);
}
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
- u8 role)
+ u8 role, u16 handle)
{
struct hci_conn *conn;
- BT_DBG("%s dst %pMR", hdev->name, dst);
+ bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
if (!conn)
@@ -960,7 +971,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
bacpy(&conn->dst, dst);
bacpy(&conn->src, &hdev->bdaddr);
- conn->handle = hci_conn_hash_alloc_unset(hdev);
+ conn->handle = handle;
conn->hdev = hdev;
conn->type = type;
conn->role = role;
@@ -973,6 +984,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
conn->rssi = HCI_RSSI_INVALID;
conn->tx_power = HCI_TX_POWER_INVALID;
conn->max_tx_power = HCI_TX_POWER_INVALID;
+ conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -1044,6 +1056,20 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
return conn;
}
+struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
+ bdaddr_t *dst, u8 role)
+{
+ int handle;
+
+ bt_dev_dbg(hdev, "dst %pMR", dst);
+
+ handle = hci_conn_hash_alloc_unset(hdev);
+ if (unlikely(handle < 0))
+ return NULL;
+
+ return hci_conn_add(hdev, type, dst, role, handle);
+}
+
static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
{
if (!reason)
@@ -1247,6 +1273,12 @@ void hci_conn_failed(struct hci_conn *conn, u8 status)
break;
}
+ /* In case of BIG/PA sync failed, clear conn flags so that
+ * the conns will be correctly cleaned up by ISO layer
+ */
+ test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
+ test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
+
conn->state = BT_CLOSED;
hci_connect_cfm(conn, status);
hci_conn_del(conn);
@@ -1274,6 +1306,9 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
if (conn->abort_reason)
return conn->abort_reason;
+ if (HCI_CONN_HANDLE_UNSET(conn->handle))
+ ida_free(&hdev->unset_handle_ida, conn->handle);
+
conn->handle = handle;
return 0;
@@ -1381,7 +1416,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
if (conn) {
bacpy(&conn->dst, dst);
} else {
- conn = hci_conn_add(hdev, LE_LINK, dst, role);
+ conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
if (!conn)
return ERR_PTR(-ENOMEM);
hci_conn_hold(conn);
@@ -1486,6 +1521,18 @@ static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
/* Allocate BIS if not set */
if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
+ if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) {
+ conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
+
+ if (conn) {
+ /* If the BIG handle is already matched to an advertising
+ * handle, do not allocate a new one.
+ */
+ qos->bcast.bis = conn->iso_qos.bcast.bis;
+ return 0;
+ }
+ }
+
/* Find an unused adv set to advertise BIS, skip instance 0x00
* since it is reserved as general purpose set.
*/
@@ -1546,7 +1593,7 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
memcmp(conn->le_per_adv_data, base, base_len)))
return ERR_PTR(-EADDRINUSE);
- conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+ conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
if (!conn)
return ERR_PTR(-ENOMEM);
@@ -1590,7 +1637,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
BT_DBG("requesting refresh of dst_addr");
- conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
+ conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
if (!conn)
return ERR_PTR(-ENOMEM);
@@ -1638,7 +1685,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
- acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
+ acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
if (!acl)
return ERR_PTR(-ENOMEM);
}
@@ -1698,7 +1745,7 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
sco = hci_conn_hash_lookup_ba(hdev, type, dst);
if (!sco) {
- sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
+ sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
if (!sco) {
hci_conn_drop(acl);
return ERR_PTR(-ENOMEM);
@@ -1890,7 +1937,7 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
qos->ucast.cis);
if (!cis) {
- cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
+ cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
if (!cis)
return ERR_PTR(-ENOMEM);
cis->cleanup = cis_cleanup;
@@ -2139,7 +2186,7 @@ int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
} pdu;
int err;
- if (num_bis > sizeof(pdu.bis))
+ if (num_bis < 0x01 || num_bis > sizeof(pdu.bis))
return -EINVAL;
err = qos_set_big(hdev, qos);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 195aea2198a9..65601aa52e0d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2535,6 +2535,8 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
mutex_init(&hdev->lock);
mutex_init(&hdev->req_lock);
+ ida_init(&hdev->unset_handle_ida);
+
INIT_LIST_HEAD(&hdev->mesh_pending);
INIT_LIST_HEAD(&hdev->mgmt_pending);
INIT_LIST_HEAD(&hdev->reject_list);
@@ -2789,6 +2791,7 @@ void hci_release_dev(struct hci_dev *hdev)
hci_codec_list_clear(&hdev->local_codecs);
hci_dev_unlock(hdev);
+ ida_destroy(&hdev->unset_handle_ida);
ida_simple_remove(&hci_index_ida, hdev->id);
kfree_skb(hdev->sent_cmd);
kfree_skb(hdev->recv_event);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 1e1c9147356c..0849e0dafa95 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2335,8 +2335,8 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
}
} else {
if (!conn) {
- conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
- HCI_ROLE_MASTER);
+ conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
+ HCI_ROLE_MASTER);
if (!conn)
bt_dev_err(hdev, "no memory for new connection");
}
@@ -3151,8 +3151,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
&ev->bdaddr,
BDADDR_BREDR)) {
- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
- HCI_ROLE_SLAVE);
+ conn = hci_conn_add_unset(hdev, ev->link_type,
+ &ev->bdaddr, HCI_ROLE_SLAVE);
if (!conn) {
bt_dev_err(hdev, "no memory for new conn");
goto unlock;
@@ -3317,8 +3317,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
&ev->bdaddr);
if (!conn) {
- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
- HCI_ROLE_SLAVE);
+ conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
+ HCI_ROLE_SLAVE);
if (!conn) {
bt_dev_err(hdev, "no memory for new connection");
goto unlock;
@@ -5890,7 +5890,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
if (status)
goto unlock;
- conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
+ conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
if (!conn) {
bt_dev_err(hdev, "no memory for new connection");
goto unlock;
@@ -5952,17 +5952,11 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
- if (handle > HCI_CONN_HANDLE_MAX) {
- bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
- HCI_CONN_HANDLE_MAX);
- status = HCI_ERROR_INVALID_PARAMETERS;
- }
-
/* All connection failure handling is taken care of by the
* hci_conn_failed function which is triggered by the HCI
* request completion callbacks used for connecting.
*/
- if (status)
+ if (status || hci_conn_set_handle(conn, handle))
goto unlock;
/* Drop the connection if it has been aborted */
@@ -5986,7 +5980,6 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
mgmt_device_connected(hdev, conn, NULL, 0);
conn->sec_level = BT_SECURITY_LOW;
- conn->handle = handle;
conn->state = BT_CONFIG;
/* Store current advertising instance as connection advertising instance
@@ -6603,7 +6596,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
struct hci_ev_le_pa_sync_established *ev = data;
int mask = hdev->link_mode;
__u8 flags = 0;
- struct hci_conn *bis;
+ struct hci_conn *pa_sync;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
@@ -6620,20 +6613,19 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
if (!(flags & HCI_PROTO_DEFER))
goto unlock;
- /* Add connection to indicate the PA sync event */
- bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
- HCI_ROLE_SLAVE);
+ if (ev->status) {
+ /* Add connection to indicate the failed PA sync event */
+ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
+ HCI_ROLE_SLAVE);
- if (!bis)
- goto unlock;
+ if (!pa_sync)
+ goto unlock;
- if (ev->status)
- set_bit(HCI_CONN_PA_SYNC_FAILED, &bis->flags);
- else
- set_bit(HCI_CONN_PA_SYNC, &bis->flags);
+ set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
- /* Notify connection to iso layer */
- hci_connect_cfm(bis, ev->status);
+ /* Notify iso layer */
+ hci_connect_cfm(pa_sync, ev->status);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -7020,12 +7012,12 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
if (!cis) {
- cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
+ cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
+ cis_handle);
if (!cis) {
hci_le_reject_cis(hdev, ev->cis_handle);
goto unlock;
}
- cis->handle = cis_handle;
}
cis->iso_qos.ucast.cig = ev->cig_id;
@@ -7113,7 +7105,6 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
{
struct hci_evt_le_big_sync_estabilished *ev = data;
struct hci_conn *bis;
- struct hci_conn *pa_sync;
int i;
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
@@ -7124,15 +7115,6 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
- if (!ev->status) {
- pa_sync = hci_conn_hash_lookup_pa_sync(hdev, ev->handle);
- if (pa_sync)
- /* Also mark the BIG sync established event on the
- * associated PA sync hcon
- */
- set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags);
- }
-
for (i = 0; i < ev->num_bis; i++) {
u16 handle = le16_to_cpu(ev->bis[i]);
__le32 interval;
@@ -7140,10 +7122,9 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
bis = hci_conn_hash_lookup_handle(hdev, handle);
if (!bis) {
bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
- HCI_ROLE_SLAVE);
+ HCI_ROLE_SLAVE, handle);
if (!bis)
continue;
- bis->handle = handle;
}
if (ev->status != 0x42)
@@ -7186,15 +7167,42 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
struct hci_evt_le_big_info_adv_report *ev = data;
int mask = hdev->link_mode;
__u8 flags = 0;
+ struct hci_conn *pa_sync;
bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
hci_dev_lock(hdev);
mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
- if (!(mask & HCI_LM_ACCEPT))
+ if (!(mask & HCI_LM_ACCEPT)) {
hci_le_pa_term_sync(hdev, ev->sync_handle);
+ goto unlock;
+ }
+ if (!(flags & HCI_PROTO_DEFER))
+ goto unlock;
+
+ pa_sync = hci_conn_hash_lookup_pa_sync_handle
+ (hdev,
+ le16_to_cpu(ev->sync_handle));
+
+ if (pa_sync)
+ goto unlock;
+
+ /* Add connection to indicate the PA sync event */
+ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
+ HCI_ROLE_SLAVE);
+
+ if (!pa_sync)
+ goto unlock;
+
+ pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
+ set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
+
+ /* Notify iso layer */
+ hci_connect_cfm(pa_sync, 0x00);
+
+unlock:
hci_dev_unlock(hdev);
}
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index a15ab0b874a9..d85a7091a116 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -152,7 +152,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
struct sk_buff *skb;
int err = 0;
- bt_dev_dbg(hdev, "Opcode 0x%4x", opcode);
+ bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
hci_req_init(&req, hdev);
@@ -248,7 +248,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
if (IS_ERR(skb)) {
if (!event)
- bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
+ bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
PTR_ERR(skb));
return PTR_ERR(skb);
}
@@ -1312,7 +1312,7 @@ int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
return hci_enable_ext_advertising_sync(hdev, instance);
}
-static int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
+int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
{
struct hci_cp_le_set_per_adv_enable cp;
struct adv_info *adv = NULL;
@@ -4264,12 +4264,12 @@ static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
{
struct hci_cp_le_set_host_feature cp;
- if (!iso_capable(hdev))
+ if (!cis_capable(hdev))
return 0;
memset(&cp, 0, sizeof(cp));
- /* Isochronous Channels (Host Support) */
+ /* Connected Isochronous Channels (Host Support) */
cp.bit_number = 32;
cp.bit_value = 1;
@@ -5232,6 +5232,17 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
if (conn->type == AMP_LINK)
return hci_disconnect_phy_link_sync(hdev, conn->handle, reason);
+ if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
+ /* This is a BIS connection, hci_conn_del will
+ * do the necessary cleanup.
+ */
+ hci_dev_lock(hdev);
+ hci_conn_failed(conn, reason);
+ hci_dev_unlock(hdev);
+
+ return 0;
+ }
+
memset(&cp, 0, sizeof(cp));
cp.handle = cpu_to_le16(conn->handle);
cp.reason = reason;
@@ -5384,21 +5395,6 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
err = hci_reject_conn_sync(hdev, conn, reason);
break;
case BT_OPEN:
- hci_dev_lock(hdev);
-
- /* Cleanup bis or pa sync connections */
- if (test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags) ||
- test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags)) {
- hci_conn_failed(conn, reason);
- } else if (test_bit(HCI_CONN_PA_SYNC, &conn->flags) ||
- test_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
- conn->state = BT_CLOSED;
- hci_disconn_cfm(conn, reason);
- hci_conn_del(conn);
- }
-
- hci_dev_unlock(hdev);
- return 0;
case BT_BOUND:
break;
default:
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 15b33579007c..367e32fe30eb 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -35,7 +35,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p", conn);
+ bt_dev_dbg(hdev, "conn %p", conn);
conn->dev.type = &bt_link;
conn->dev.class = &bt_class;
@@ -48,27 +48,30 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p", conn);
+ bt_dev_dbg(hdev, "conn %p", conn);
if (device_is_registered(&conn->dev))
return;
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
- if (device_add(&conn->dev) < 0) {
+ if (device_add(&conn->dev) < 0)
bt_dev_err(hdev, "failed to register connection device");
- return;
- }
-
- hci_dev_hold(hdev);
}
void hci_conn_del_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- if (!device_is_registered(&conn->dev))
+ bt_dev_dbg(hdev, "conn %p", conn);
+
+ if (!device_is_registered(&conn->dev)) {
+ /* If device_add() has *not* succeeded, use *only* put_device()
+ * to drop the reference count.
+ */
+ put_device(&conn->dev);
return;
+ }
while (1) {
struct device *dev;
@@ -80,9 +83,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
put_device(dev);
}
- device_del(&conn->dev);
-
- hci_dev_put(hdev);
+ device_unregister(&conn->dev);
}
static void bt_host_release(struct device *dev)
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 71248163ce9a..07b80e97aead 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -14,6 +14,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/iso.h>
+#include "eir.h"
static const struct proto_ops iso_sock_ops;
@@ -47,6 +48,7 @@ static void iso_sock_kill(struct sock *sk);
#define EIR_SERVICE_DATA_LENGTH 4
#define BASE_MAX_LENGTH (HCI_MAX_PER_AD_LENGTH - EIR_SERVICE_DATA_LENGTH)
+#define EIR_BAA_SERVICE_UUID 0x1851
/* iso_pinfo flags values */
enum {
@@ -77,6 +79,7 @@ static struct bt_iso_qos default_qos;
static bool check_ucast_qos(struct bt_iso_qos *qos);
static bool check_bcast_qos(struct bt_iso_qos *qos);
static bool iso_match_sid(struct sock *sk, void *data);
+static bool iso_match_sync_handle(struct sock *sk, void *data);
static void iso_sock_disconn(struct sock *sk);
/* ---- ISO timers ---- */
@@ -789,8 +792,7 @@ static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr,
BT_DBG("sk %p bc_sid %u bc_num_bis %u", sk, sa->iso_bc->bc_sid,
sa->iso_bc->bc_num_bis);
- if (addr_len > sizeof(*sa) + sizeof(*sa->iso_bc) ||
- sa->iso_bc->bc_num_bis < 0x01 || sa->iso_bc->bc_num_bis > 0x1f)
+ if (addr_len > sizeof(*sa) + sizeof(*sa->iso_bc))
return -EINVAL;
bacpy(&iso_pi(sk)->dst, &sa->iso_bc->bc_bdaddr);
@@ -1202,7 +1204,6 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
test_bit(HCI_CONN_PA_SYNC, &pi->conn->hcon->flags)) {
iso_conn_big_sync(sk);
sk->sk_state = BT_LISTEN;
- set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
} else {
iso_conn_defer_accept(pi->conn->hcon);
sk->sk_state = BT_CONFIG;
@@ -1461,6 +1462,8 @@ static int iso_sock_getsockopt(struct socket *sock, int level, int optname,
len = min_t(unsigned int, len, base_len);
if (copy_to_user(optval, base, len))
err = -EFAULT;
+ if (put_user(len, optlen))
+ err = -EFAULT;
break;
@@ -1579,6 +1582,7 @@ static void iso_conn_ready(struct iso_conn *conn)
struct sock *sk = conn->sk;
struct hci_ev_le_big_sync_estabilished *ev = NULL;
struct hci_ev_le_pa_sync_established *ev2 = NULL;
+ struct hci_evt_le_big_info_adv_report *ev3 = NULL;
struct hci_conn *hcon;
BT_DBG("conn %p", conn);
@@ -1603,14 +1607,20 @@ static void iso_conn_ready(struct iso_conn *conn)
parent = iso_get_sock_listen(&hcon->src,
&hcon->dst,
iso_match_big, ev);
- } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags) ||
- test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
+ } else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
ev2 = hci_recv_event_data(hcon->hdev,
HCI_EV_LE_PA_SYNC_ESTABLISHED);
if (ev2)
parent = iso_get_sock_listen(&hcon->src,
&hcon->dst,
iso_match_sid, ev2);
+ } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
+ ev3 = hci_recv_event_data(hcon->hdev,
+ HCI_EVT_LE_BIG_INFO_ADV_REPORT);
+ if (ev3)
+ parent = iso_get_sock_listen(&hcon->src,
+ &hcon->dst,
+ iso_match_sync_handle, ev3);
}
if (!parent)
@@ -1650,11 +1660,13 @@ static void iso_conn_ready(struct iso_conn *conn)
hcon->sync_handle = iso_pi(parent)->sync_handle;
}
- if (ev2 && !ev2->status) {
- iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle;
+ if (ev3) {
iso_pi(sk)->qos = iso_pi(parent)->qos;
+ iso_pi(sk)->qos.bcast.encryption = ev3->encryption;
+ hcon->iso_qos = iso_pi(sk)->qos;
iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis;
memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS);
+ set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags);
}
bacpy(&iso_pi(sk)->dst, &hcon->dst);
@@ -1774,12 +1786,16 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
ev3 = hci_recv_event_data(hdev, HCI_EV_LE_PER_ADV_REPORT);
if (ev3) {
+ size_t base_len = ev3->length;
+ u8 *base;
+
sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
iso_match_sync_handle_pa_report, ev3);
-
- if (sk) {
- memcpy(iso_pi(sk)->base, ev3->data, ev3->length);
- iso_pi(sk)->base_len = ev3->length;
+ base = eir_get_service_data(ev3->data, ev3->length,
+ EIR_BAA_SERVICE_UUID, &base_len);
+ if (base && sk && base_len <= sizeof(iso_pi(sk)->base)) {
+ memcpy(iso_pi(sk)->base, base, base_len);
+ iso_pi(sk)->base_len = base_len;
}
} else {
sk = iso_get_sock_listen(&hdev->bdaddr, BDADDR_ANY, NULL, NULL);
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index abbafa6194ca..630e3023273b 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -150,10 +150,7 @@ static bool read_supported_features(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
- if (IS_ERR_OR_NULL(skb)) {
- if (!skb)
- skb = ERR_PTR(-EIO);
-
+ if (IS_ERR(skb)) {
bt_dev_err(hdev, "Failed to read MSFT supported features (%ld)",
PTR_ERR(skb));
return false;
@@ -353,7 +350,7 @@ static void msft_remove_addr_filters_sync(struct hci_dev *hdev, u8 handle)
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
- if (IS_ERR_OR_NULL(skb)) {
+ if (IS_ERR(skb)) {
kfree(address_filter);
continue;
}
@@ -442,11 +439,8 @@ static int msft_remove_monitor_sync(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
- if (IS_ERR_OR_NULL(skb)) {
- if (!skb)
- return -EIO;
+ if (IS_ERR(skb))
return PTR_ERR(skb);
- }
return msft_le_cancel_monitor_advertisement_cb(hdev, hdev->msft_opcode,
monitor, skb);
@@ -559,7 +553,7 @@ static int msft_add_monitor_sync(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp,
HCI_CMD_TIMEOUT);
- if (IS_ERR_OR_NULL(skb)) {
+ if (IS_ERR(skb)) {
err = PTR_ERR(skb);
goto out_free;
}
@@ -740,10 +734,10 @@ static int msft_cancel_address_filter_sync(struct hci_dev *hdev, void *data)
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
- if (IS_ERR_OR_NULL(skb)) {
+ if (IS_ERR(skb)) {
bt_dev_err(hdev, "MSFT: Failed to cancel address (%pMR) filter",
&address_filter->bdaddr);
- err = -EIO;
+ err = PTR_ERR(skb);
goto done;
}
kfree_skb(skb);
@@ -893,7 +887,7 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, size, cp,
HCI_CMD_TIMEOUT);
- if (IS_ERR_OR_NULL(skb)) {
+ if (IS_ERR(skb)) {
bt_dev_err(hdev, "Failed to enable address %pMR filter",
&address_filter->bdaddr);
skb = NULL;
diff --git a/net/core/dev.c b/net/core/dev.c
index 1025dc79bc49..a37a932a3e14 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1057,7 +1057,7 @@ EXPORT_SYMBOL(dev_valid_name);
* __dev_alloc_name - allocate a name for a device
* @net: network namespace to allocate the device name in
* @name: name format string
- * @buf: scratch buffer and result name string
+ * @res: result name string
*
* Passed a format string - eg "lt%d" it will try and find a suitable
* id. It scans list of devices to build up a free map, then chooses
@@ -1068,106 +1068,79 @@ EXPORT_SYMBOL(dev_valid_name);
* Returns the number of the unit assigned or a negative errno code.
*/
-static int __dev_alloc_name(struct net *net, const char *name, char *buf)
+static int __dev_alloc_name(struct net *net, const char *name, char *res)
{
int i = 0;
const char *p;
const int max_netdevices = 8*PAGE_SIZE;
unsigned long *inuse;
struct net_device *d;
+ char buf[IFNAMSIZ];
- if (!dev_valid_name(name))
- return -EINVAL;
-
+ /* Verify the string as this thing may have come from the user.
+ * There must be one "%d" and no other "%" characters.
+ */
p = strchr(name, '%');
- if (p) {
- /*
- * Verify the string as this thing may have come from
- * the user. There must be either one "%d" and no other "%"
- * characters.
- */
- if (p[1] != 'd' || strchr(p + 2, '%'))
- return -EINVAL;
-
- /* Use one page as a bit array of possible slots */
- inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC);
- if (!inuse)
- return -ENOMEM;
+ if (!p || p[1] != 'd' || strchr(p + 2, '%'))
+ return -EINVAL;
- for_each_netdev(net, d) {
- struct netdev_name_node *name_node;
+ /* Use one page as a bit array of possible slots */
+ inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC);
+ if (!inuse)
+ return -ENOMEM;
- netdev_for_each_altname(d, name_node) {
- if (!sscanf(name_node->name, name, &i))
- continue;
- if (i < 0 || i >= max_netdevices)
- continue;
+ for_each_netdev(net, d) {
+ struct netdev_name_node *name_node;
- /* avoid cases where sscanf is not exact inverse of printf */
- snprintf(buf, IFNAMSIZ, name, i);
- if (!strncmp(buf, name_node->name, IFNAMSIZ))
- __set_bit(i, inuse);
- }
- if (!sscanf(d->name, name, &i))
+ netdev_for_each_altname(d, name_node) {
+ if (!sscanf(name_node->name, name, &i))
continue;
if (i < 0 || i >= max_netdevices)
continue;
- /* avoid cases where sscanf is not exact inverse of printf */
+ /* avoid cases where sscanf is not exact inverse of printf */
snprintf(buf, IFNAMSIZ, name, i);
- if (!strncmp(buf, d->name, IFNAMSIZ))
+ if (!strncmp(buf, name_node->name, IFNAMSIZ))
__set_bit(i, inuse);
}
+ if (!sscanf(d->name, name, &i))
+ continue;
+ if (i < 0 || i >= max_netdevices)
+ continue;
- i = find_first_zero_bit(inuse, max_netdevices);
- bitmap_free(inuse);
+ /* avoid cases where sscanf is not exact inverse of printf */
+ snprintf(buf, IFNAMSIZ, name, i);
+ if (!strncmp(buf, d->name, IFNAMSIZ))
+ __set_bit(i, inuse);
}
- snprintf(buf, IFNAMSIZ, name, i);
- if (!netdev_name_in_use(net, buf))
- return i;
+ i = find_first_zero_bit(inuse, max_netdevices);
+ bitmap_free(inuse);
+ if (i == max_netdevices)
+ return -ENFILE;
- /* It is possible to run out of possible slots
- * when the name is long and there isn't enough space left
- * for the digits, or if all bits are used.
- */
- return -ENFILE;
+ snprintf(res, IFNAMSIZ, name, i);
+ return i;
}
+/* Returns negative errno or allocated unit id (see __dev_alloc_name()) */
static int dev_prep_valid_name(struct net *net, struct net_device *dev,
- const char *want_name, char *out_name)
+ const char *want_name, char *out_name,
+ int dup_errno)
{
- int ret;
-
if (!dev_valid_name(want_name))
return -EINVAL;
- if (strchr(want_name, '%')) {
- ret = __dev_alloc_name(net, want_name, out_name);
- return ret < 0 ? ret : 0;
- } else if (netdev_name_in_use(net, want_name)) {
- return -EEXIST;
- } else if (out_name != want_name) {
- strscpy(out_name, want_name, IFNAMSIZ);
- }
+ if (strchr(want_name, '%'))
+ return __dev_alloc_name(net, want_name, out_name);
+ if (netdev_name_in_use(net, want_name))
+ return -dup_errno;
+ if (out_name != want_name)
+ strscpy(out_name, want_name, IFNAMSIZ);
return 0;
}
-static int dev_alloc_name_ns(struct net *net,
- struct net_device *dev,
- const char *name)
-{
- char buf[IFNAMSIZ];
- int ret;
-
- BUG_ON(!net);
- ret = __dev_alloc_name(net, name, buf);
- if (ret >= 0)
- strscpy(dev->name, buf, IFNAMSIZ);
- return ret;
-}
-
/**
* dev_alloc_name - allocate a name for a device
* @dev: device
@@ -1184,20 +1157,17 @@ static int dev_alloc_name_ns(struct net *net,
int dev_alloc_name(struct net_device *dev, const char *name)
{
- return dev_alloc_name_ns(dev_net(dev), dev, name);
+ return dev_prep_valid_name(dev_net(dev), dev, name, dev->name, ENFILE);
}
EXPORT_SYMBOL(dev_alloc_name);
static int dev_get_valid_name(struct net *net, struct net_device *dev,
const char *name)
{
- char buf[IFNAMSIZ];
int ret;
- ret = dev_prep_valid_name(net, dev, name, buf);
- if (ret >= 0)
- strscpy(dev->name, buf, IFNAMSIZ);
- return ret;
+ ret = dev_prep_valid_name(net, dev, name, dev->name, EEXIST);
+ return ret < 0 ? ret : 0;
}
/**
@@ -11135,7 +11105,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
/* We get here if we can't use the current device name */
if (!pat)
goto out;
- err = dev_prep_valid_name(net, dev, pat, new_name);
+ err = dev_prep_valid_name(net, dev, pat, new_name, EEXIST);
if (err < 0)
goto out;
}
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b46aedc36939..feeddf95f450 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -382,7 +382,7 @@ static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr)
if (err)
return err;
- err = dsa_master_hwtstamp_validate(dev, &kernel_cfg, &extack);
+ err = dsa_conduit_hwtstamp_validate(dev, &kernel_cfg, &extack);
if (err) {
if (extack._msg)
netdev_err(dev, "%s\n", extack._msg);
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 8a9868ea5067..5e409b98aba0 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -376,6 +376,14 @@ static void page_pool_set_pp_info(struct page_pool *pool,
{
page->pp = pool;
page->pp_magic |= PP_SIGNATURE;
+
+ /* Ensuring all pages have been split into one fragment initially:
+ * page_pool_set_pp_info() is only called once for every page when it
+ * is allocated from the page allocator and page_pool_fragment_page()
+ * is dirtying the same cache line as the page->pp_magic above, so
+ * the overhead is negligible.
+ */
+ page_pool_fragment_page(page, 1);
if (pool->p.init_callback)
pool->p.init_callback(page, pool->p.init_arg);
}
@@ -672,7 +680,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
struct page *page = virt_to_head_page(data[i]);
/* It is not the last user for the page frag case */
- if (!page_pool_is_last_frag(pool, page))
+ if (!page_pool_is_last_frag(page))
continue;
page = __page_pool_put_page(pool, page, -1, false);
@@ -748,8 +756,7 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
unsigned int max_size = PAGE_SIZE << pool->p.order;
struct page *page = pool->frag_page;
- if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
- size > max_size))
+ if (WARN_ON(size > max_size))
return NULL;
size = ALIGN(size, dma_get_cache_alignment());
@@ -802,7 +809,7 @@ static void page_pool_empty_ring(struct page_pool *pool)
}
}
-static void page_pool_free(struct page_pool *pool)
+static void __page_pool_destroy(struct page_pool *pool)
{
if (pool->disconnect)
pool->disconnect(pool);
@@ -853,7 +860,7 @@ static int page_pool_release(struct page_pool *pool)
page_pool_scrub(pool);
inflight = page_pool_inflight(pool);
if (!inflight)
- page_pool_free(pool);
+ __page_pool_destroy(pool);
return inflight;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 975c9a6ffb4a..c52ddd6891d9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5765,7 +5765,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
/* In general, avoid mixing page_pool and non-page_pool allocated
* pages within the same SKB. Additionally avoid dealing with clones
* with page_pool pages, in case the SKB is using page_pool fragment
- * references (PP_FLAG_PAGE_FRAG). Since we only take full page
+ * references (page_pool_alloc_frag()). Since we only take full page
* references for cloned SKBs at the moment that would result in
* inconsistent reference counts.
* In theory we could take full references if @from is cloned and
diff --git a/net/core/sock.c b/net/core/sock.c
index 290165954379..1d28e3e87970 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3035,21 +3035,29 @@ EXPORT_SYMBOL(sk_wait_data);
* @amt: pages to allocate
* @kind: allocation type
*
- * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
+ * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc.
+ *
+ * Unlike the globally shared limits among the sockets under same protocol,
+ * consuming the budget of a memcg won't have direct effect on other ones.
+ * So be optimistic about memcg's tolerance, and leave the callers to decide
+ * whether or not to raise allocated through sk_under_memory_pressure() or
+ * its variants.
*/
int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
{
- bool memcg_charge = mem_cgroup_sockets_enabled && sk->sk_memcg;
+ struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL;
struct proto *prot = sk->sk_prot;
- bool charged = true;
+ bool charged = false;
long allocated;
sk_memory_allocated_add(sk, amt);
allocated = sk_memory_allocated(sk);
- if (memcg_charge &&
- !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt,
- gfp_memcg_charge())))
- goto suppress_allocation;
+
+ if (memcg) {
+ if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge()))
+ goto suppress_allocation;
+ charged = true;
+ }
/* Under limit. */
if (allocated <= sk_prot_mem_limits(sk, 0)) {
@@ -3065,7 +3073,14 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
if (allocated > sk_prot_mem_limits(sk, 2))
goto suppress_allocation;
- /* guarantee minimum buffer size under pressure */
+ /* Guarantee minimum buffer size under pressure (either global
+ * or memcg) to make sure features described in RFC 7323 (TCP
+ * Extensions for High Performance) work properly.
+ *
+ * This rule does NOT stand when exceeds global or memcg's hard
+ * limit, or else a DoS attack can be taken place by spawning
+ * lots of sockets whose usage are under minimum buffer size.
+ */
if (kind == SK_MEM_RECV) {
if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
return 1;
@@ -3084,8 +3099,17 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
if (sk_has_memory_pressure(sk)) {
u64 alloc;
- if (!sk_under_memory_pressure(sk))
+ /* The following 'average' heuristic is within the
+ * scope of global accounting, so it only makes
+ * sense for global memory pressure.
+ */
+ if (!sk_under_global_memory_pressure(sk))
return 1;
+
+ /* Try to be fair among all the sockets under global
+ * pressure by allowing the ones that below average
+ * usage to raise.
+ */
alloc = sk_sockets_allocated_read_positive(sk);
if (sk_prot_mem_limits(sk, 2) > alloc *
sk_mem_pages(sk->sk_wmem_queued +
@@ -3104,8 +3128,8 @@ suppress_allocation:
*/
if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
/* Force charge with __GFP_NOFAIL */
- if (memcg_charge && !charged) {
- mem_cgroup_charge_skmem(sk->sk_memcg, amt,
+ if (memcg && !charged) {
+ mem_cgroup_charge_skmem(memcg, amt,
gfp_memcg_charge() | __GFP_NOFAIL);
}
return 1;
@@ -3117,8 +3141,8 @@ suppress_allocation:
sk_memory_allocated_sub(sk, amt);
- if (memcg_charge && charged)
- mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
+ if (charged)
+ mem_cgroup_uncharge_skmem(memcg, amt);
return 0;
}
diff --git a/net/devlink/dev.c b/net/devlink/dev.c
index dc8039ca2b38..4fc7adb32663 100644
--- a/net/devlink/dev.c
+++ b/net/devlink/dev.c
@@ -492,7 +492,7 @@ free_msg:
return -EMSGSIZE;
}
-int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_reload_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
enum devlink_reload_action action;
@@ -658,7 +658,7 @@ nla_put_failure:
return err;
}
-int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct sk_buff *msg;
@@ -679,7 +679,7 @@ int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info)
return genlmsg_reply(msg, info);
}
-int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
const struct devlink_ops *ops = devlink->ops;
@@ -1108,7 +1108,7 @@ static int devlink_flash_component_get(struct devlink *devlink,
return 0;
}
-int devlink_nl_cmd_flash_update(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_flash_update_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *nla_overwrite_mask, *nla_file_name;
struct devlink_flash_update_params params = {};
@@ -1351,7 +1351,7 @@ static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_
[DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG },
};
-int devlink_nl_cmd_selftests_run(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_selftests_run_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1];
struct devlink *devlink = info->user_ptr[0];
diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h
index 741d1bf1bec8..183dbe3807ab 100644
--- a/net/devlink/devl_internal.h
+++ b/net/devlink/devl_internal.h
@@ -227,67 +227,3 @@ int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
/* Linecards */
unsigned int devlink_linecard_index(struct devlink_linecard *linecard);
-
-/* Devlink nl cmds */
-int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_flash_update(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_selftests_run(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_port_del_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_resource_set(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_resource_dump(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb);
-int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_region_del(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb);
-int devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb);
-int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
- struct genl_info *info);
-int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_rate_new_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb, struct genl_info *info);
-int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb,
- struct genl_info *info);
diff --git a/net/devlink/dpipe.c b/net/devlink/dpipe.c
index 431227c412e5..a72a9292efc5 100644
--- a/net/devlink/dpipe.c
+++ b/net/devlink/dpipe.c
@@ -289,7 +289,7 @@ err_table_put:
return err;
}
-int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_dpipe_table_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
const char *table_name = NULL;
@@ -562,8 +562,8 @@ send_done:
return genlmsg_reply(dump_ctx.skb, info);
}
-int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_dpipe_entries_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_dpipe_table *table;
@@ -712,8 +712,8 @@ err_table_put:
return err;
}
-int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_dpipe_headers_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
@@ -746,8 +746,8 @@ static int devlink_dpipe_table_counters_set(struct devlink *devlink,
return 0;
}
-int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_dpipe_table_counters_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
const char *table_name;
diff --git a/net/devlink/health.c b/net/devlink/health.c
index 89405e59f45c..695df61f8ac2 100644
--- a/net/devlink/health.c
+++ b/net/devlink/health.c
@@ -452,8 +452,8 @@ int devlink_nl_health_reporter_get_dumpit(struct sk_buff *skb,
devlink_nl_health_reporter_get_dump_one);
}
-int devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_health_reporter_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_health_reporter *reporter;
@@ -655,8 +655,8 @@ devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
}
EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update);
-int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_health_reporter_recover_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_health_reporter *reporter;
@@ -1108,8 +1108,8 @@ nla_put_failure:
return err;
}
-int devlink_nl_cmd_health_reporter_diagnose_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_health_reporter_diagnose_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_health_reporter *reporter;
@@ -1163,8 +1163,8 @@ devlink_health_reporter_get_from_cb_lock(struct netlink_callback *cb)
return reporter;
}
-int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
+int devlink_nl_health_reporter_dump_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
{
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
struct devlink_health_reporter *reporter;
@@ -1202,8 +1202,8 @@ unlock:
return err;
}
-int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_health_reporter_dump_clear_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_health_reporter *reporter;
@@ -1219,8 +1219,8 @@ int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
return 0;
}
-int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_health_reporter_test_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_health_reporter *reporter;
diff --git a/net/devlink/linecard.c b/net/devlink/linecard.c
index 9ff1813f88c5..2f1c317b64cd 100644
--- a/net/devlink/linecard.c
+++ b/net/devlink/linecard.c
@@ -369,8 +369,7 @@ out:
return err;
}
-int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_linecard_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct netlink_ext_ack *extack = info->extack;
struct devlink *devlink = info->user_ptr[0];
diff --git a/net/devlink/netlink.c b/net/devlink/netlink.c
index 809bfc3ba8c4..d0b90ebc8b15 100644
--- a/net/devlink/netlink.c
+++ b/net/devlink/netlink.c
@@ -13,75 +13,6 @@ static const struct genl_multicast_group devlink_nl_mcgrps[] = {
[DEVLINK_MCGRP_CONFIG] = { .name = DEVLINK_GENL_MCGRP_CONFIG_NAME },
};
-static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
- [DEVLINK_ATTR_UNSPEC] = { .strict_start_type =
- DEVLINK_ATTR_TRAP_POLICER_ID },
- [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
- [DEVLINK_ATTR_PORT_TYPE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_PORT_TYPE_AUTO,
- DEVLINK_PORT_TYPE_IB),
- [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
- [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
- [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
- [DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
- [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
- [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
- [DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_ESWITCH_MODE_LEGACY,
- DEVLINK_ESWITCH_MODE_SWITCHDEV),
- [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
- [DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64},
- [DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64},
- [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
- [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
- [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
- [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
- [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
- [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
- [DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK] =
- NLA_POLICY_BITFIELD32(DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS),
- [DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_TRAP_ACTION] = { .type = NLA_U8 },
- [DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_NETNS_PID] = { .type = NLA_U32 },
- [DEVLINK_ATTR_NETNS_FD] = { .type = NLA_U32 },
- [DEVLINK_ATTR_NETNS_ID] = { .type = NLA_U32 },
- [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .type = NLA_U8 },
- [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 },
- [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 },
- [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 },
- [DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED },
- [DEVLINK_ATTR_RELOAD_ACTION] = NLA_POLICY_RANGE(NLA_U8, DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
- DEVLINK_RELOAD_ACTION_MAX),
- [DEVLINK_ATTR_RELOAD_LIMITS] = NLA_POLICY_BITFIELD32(DEVLINK_RELOAD_LIMITS_VALID_MASK),
- [DEVLINK_ATTR_PORT_FLAVOUR] = { .type = NLA_U16 },
- [DEVLINK_ATTR_PORT_PCI_PF_NUMBER] = { .type = NLA_U16 },
- [DEVLINK_ATTR_PORT_PCI_SF_NUMBER] = { .type = NLA_U32 },
- [DEVLINK_ATTR_PORT_CONTROLLER_NUMBER] = { .type = NLA_U32 },
- [DEVLINK_ATTR_RATE_TYPE] = { .type = NLA_U16 },
- [DEVLINK_ATTR_RATE_TX_SHARE] = { .type = NLA_U64 },
- [DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64 },
- [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32 },
- [DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING },
- [DEVLINK_ATTR_SELFTESTS] = { .type = NLA_NESTED },
- [DEVLINK_ATTR_RATE_TX_PRIORITY] = { .type = NLA_U32 },
- [DEVLINK_ATTR_RATE_TX_WEIGHT] = { .type = NLA_U32 },
- [DEVLINK_ATTR_REGION_DIRECT] = { .type = NLA_FLAG },
-};
-
int devlink_nl_put_nested_handle(struct sk_buff *msg, struct net *net,
struct devlink *devlink, int attrtype)
{
@@ -191,7 +122,7 @@ unlock:
int devlink_nl_pre_doit(const struct genl_split_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
- return __devlink_nl_pre_doit(skb, info, ops->internal_flags);
+ return __devlink_nl_pre_doit(skb, info, 0);
}
int devlink_nl_pre_doit_port(const struct genl_split_ops *ops,
@@ -287,269 +218,12 @@ int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
return devlink_nl_inst_iter_dumpit(msg, cb, flags, dump_one);
}
-static const struct genl_small_ops devlink_nl_small_ops[40] = {
- {
- .cmd = DEVLINK_CMD_PORT_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_set_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_RATE_SET,
- .doit = devlink_nl_cmd_rate_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_RATE_NEW,
- .doit = devlink_nl_cmd_rate_new_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_RATE_DEL,
- .doit = devlink_nl_cmd_rate_del_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_PORT_SPLIT,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_split_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_PORT_UNSPLIT,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_unsplit_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_PORT_NEW,
- .doit = devlink_nl_cmd_port_new_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_PORT_DEL,
- .doit = devlink_nl_cmd_port_del_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
-
- {
- .cmd = DEVLINK_CMD_LINECARD_SET,
- .doit = devlink_nl_cmd_linecard_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_SB_POOL_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_pool_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_port_pool_set_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_occ_snapshot_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_occ_max_clear_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_ESWITCH_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_eswitch_get_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_ESWITCH_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_eswitch_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_dpipe_table_get,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_dpipe_entries_get,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_dpipe_headers_get,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_dpipe_table_counters_set,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_RESOURCE_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_resource_set,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_RESOURCE_DUMP,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_resource_dump,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_RELOAD,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_reload,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_PARAM_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_param_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_PORT_PARAM_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_param_get_doit,
- .dumpit = devlink_nl_cmd_port_param_get_dumpit,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_PORT_PARAM_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_param_set_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_REGION_NEW,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_region_new,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_REGION_DEL,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_region_del,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_REGION_READ,
- .validate = GENL_DONT_VALIDATE_STRICT |
- GENL_DONT_VALIDATE_DUMP_STRICT,
- .dumpit = devlink_nl_cmd_region_read_dumpit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_set_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_recover_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_diagnose_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
- .validate = GENL_DONT_VALIDATE_STRICT |
- GENL_DONT_VALIDATE_DUMP_STRICT,
- .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_test_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- },
- {
- .cmd = DEVLINK_CMD_FLASH_UPDATE,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_flash_update,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_TRAP_SET,
- .doit = devlink_nl_cmd_trap_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_TRAP_GROUP_SET,
- .doit = devlink_nl_cmd_trap_group_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_TRAP_POLICER_SET,
- .doit = devlink_nl_cmd_trap_policer_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_SELFTESTS_RUN,
- .doit = devlink_nl_cmd_selftests_run,
- .flags = GENL_ADMIN_PERM,
- },
- /* -- No new ops here! Use split ops going forward! -- */
-};
-
struct genl_family devlink_nl_family __ro_after_init = {
.name = DEVLINK_GENL_NAME,
.version = DEVLINK_GENL_VERSION,
- .maxattr = DEVLINK_ATTR_MAX,
- .policy = devlink_nl_policy,
.netnsok = true,
.parallel_ops = true,
- .pre_doit = devlink_nl_pre_doit,
- .post_doit = devlink_nl_post_doit,
.module = THIS_MODULE,
- .small_ops = devlink_nl_small_ops,
- .n_small_ops = ARRAY_SIZE(devlink_nl_small_ops),
.split_ops = devlink_nl_ops,
.n_split_ops = ARRAY_SIZE(devlink_nl_ops),
.resv_start_op = DEVLINK_CMD_SELFTESTS_RUN + 1,
diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c
index 467b7a431de1..9cbae0169249 100644
--- a/net/devlink/netlink_gen.c
+++ b/net/devlink/netlink_gen.c
@@ -10,6 +10,18 @@
#include <uapi/linux/devlink.h>
+/* Common nested types */
+const struct nla_policy devlink_dl_port_function_nl_policy[DEVLINK_PORT_FN_ATTR_CAPS + 1] = {
+ [DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY, },
+ [DEVLINK_PORT_FN_ATTR_STATE] = NLA_POLICY_MAX(NLA_U8, 1),
+ [DEVLINK_PORT_FN_ATTR_OPSTATE] = NLA_POLICY_MAX(NLA_U8, 1),
+ [DEVLINK_PORT_FN_ATTR_CAPS] = NLA_POLICY_BITFIELD32(3),
+};
+
+const struct nla_policy devlink_dl_selftest_id_nl_policy[DEVLINK_ATTR_SELFTEST_ID_FLASH + 1] = {
+ [DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG, },
+};
+
/* DEVLINK_CMD_GET - do */
static const struct nla_policy devlink_get_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -29,6 +41,48 @@ static const struct nla_policy devlink_port_get_dump_nl_policy[DEVLINK_ATTR_DEV_
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_PORT_SET - do */
+static const struct nla_policy devlink_port_set_nl_policy[DEVLINK_ATTR_PORT_FUNCTION + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_PORT_TYPE] = NLA_POLICY_MAX(NLA_U16, 3),
+ [DEVLINK_ATTR_PORT_FUNCTION] = NLA_POLICY_NESTED(devlink_dl_port_function_nl_policy),
+};
+
+/* DEVLINK_CMD_PORT_NEW - do */
+static const struct nla_policy devlink_port_new_nl_policy[DEVLINK_ATTR_PORT_PCI_SF_NUMBER + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_PORT_FLAVOUR] = NLA_POLICY_MAX(NLA_U16, 7),
+ [DEVLINK_ATTR_PORT_PCI_PF_NUMBER] = { .type = NLA_U16, },
+ [DEVLINK_ATTR_PORT_PCI_SF_NUMBER] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_PORT_CONTROLLER_NUMBER] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_PORT_DEL - do */
+static const struct nla_policy devlink_port_del_nl_policy[DEVLINK_ATTR_PORT_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_PORT_SPLIT - do */
+static const struct nla_policy devlink_port_split_nl_policy[DEVLINK_ATTR_PORT_SPLIT_COUNT + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_PORT_UNSPLIT - do */
+static const struct nla_policy devlink_port_unsplit_nl_policy[DEVLINK_ATTR_PORT_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+};
+
/* DEVLINK_CMD_SB_GET - do */
static const struct nla_policy devlink_sb_get_do_nl_policy[DEVLINK_ATTR_SB_INDEX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -56,6 +110,16 @@ static const struct nla_policy devlink_sb_pool_get_dump_nl_policy[DEVLINK_ATTR_D
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_SB_POOL_SET - do */
+static const struct nla_policy devlink_sb_pool_set_nl_policy[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16, },
+ [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = NLA_POLICY_MAX(NLA_U8, 1),
+ [DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32, },
+};
+
/* DEVLINK_CMD_SB_PORT_POOL_GET - do */
static const struct nla_policy devlink_sb_port_pool_get_do_nl_policy[DEVLINK_ATTR_SB_POOL_INDEX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -71,6 +135,16 @@ static const struct nla_policy devlink_sb_port_pool_get_dump_nl_policy[DEVLINK_A
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_SB_PORT_POOL_SET - do */
+static const struct nla_policy devlink_sb_port_pool_set_nl_policy[DEVLINK_ATTR_SB_THRESHOLD + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16, },
+ [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32, },
+};
+
/* DEVLINK_CMD_SB_TC_POOL_BIND_GET - do */
static const struct nla_policy devlink_sb_tc_pool_bind_get_do_nl_policy[DEVLINK_ATTR_SB_TC_INDEX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -87,6 +161,100 @@ static const struct nla_policy devlink_sb_tc_pool_bind_get_dump_nl_policy[DEVLIN
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_SB_TC_POOL_BIND_SET - do */
+static const struct nla_policy devlink_sb_tc_pool_bind_set_nl_policy[DEVLINK_ATTR_SB_TC_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16, },
+ [DEVLINK_ATTR_SB_POOL_TYPE] = NLA_POLICY_MAX(NLA_U8, 1),
+ [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16, },
+ [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_SB_OCC_SNAPSHOT - do */
+static const struct nla_policy devlink_sb_occ_snapshot_nl_policy[DEVLINK_ATTR_SB_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_SB_OCC_MAX_CLEAR - do */
+static const struct nla_policy devlink_sb_occ_max_clear_nl_policy[DEVLINK_ATTR_SB_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_ESWITCH_GET - do */
+static const struct nla_policy devlink_eswitch_get_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_ESWITCH_SET - do */
+static const struct nla_policy devlink_eswitch_set_nl_policy[DEVLINK_ATTR_ESWITCH_ENCAP_MODE + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_MAX(NLA_U16, 1),
+ [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U16, 3),
+ [DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = NLA_POLICY_MAX(NLA_U8, 1),
+};
+
+/* DEVLINK_CMD_DPIPE_TABLE_GET - do */
+static const struct nla_policy devlink_dpipe_table_get_nl_policy[DEVLINK_ATTR_DPIPE_TABLE_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_DPIPE_ENTRIES_GET - do */
+static const struct nla_policy devlink_dpipe_entries_get_nl_policy[DEVLINK_ATTR_DPIPE_TABLE_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_DPIPE_HEADERS_GET - do */
+static const struct nla_policy devlink_dpipe_headers_get_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET - do */
+static const struct nla_policy devlink_dpipe_table_counters_set_nl_policy[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8, },
+};
+
+/* DEVLINK_CMD_RESOURCE_SET - do */
+static const struct nla_policy devlink_resource_set_nl_policy[DEVLINK_ATTR_RESOURCE_SIZE + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_RESOURCE_ID] = { .type = NLA_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE] = { .type = NLA_U64, },
+};
+
+/* DEVLINK_CMD_RESOURCE_DUMP - do */
+static const struct nla_policy devlink_resource_dump_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_RELOAD - do */
+static const struct nla_policy devlink_reload_nl_policy[DEVLINK_ATTR_RELOAD_LIMITS + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_RELOAD_ACTION] = NLA_POLICY_RANGE(NLA_U8, 1, 2),
+ [DEVLINK_ATTR_RELOAD_LIMITS] = NLA_POLICY_BITFIELD32(6),
+ [DEVLINK_ATTR_NETNS_PID] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_NETNS_FD] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_NETNS_ID] = { .type = NLA_U32, },
+};
+
/* DEVLINK_CMD_PARAM_GET - do */
static const struct nla_policy devlink_param_get_do_nl_policy[DEVLINK_ATTR_PARAM_NAME + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -100,6 +268,15 @@ static const struct nla_policy devlink_param_get_dump_nl_policy[DEVLINK_ATTR_DEV
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_PARAM_SET - do */
+static const struct nla_policy devlink_param_set_nl_policy[DEVLINK_ATTR_PARAM_VALUE_CMODE + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PARAM_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PARAM_TYPE] = { .type = NLA_U8, },
+ [DEVLINK_ATTR_PARAM_VALUE_CMODE] = NLA_POLICY_MAX(NLA_U8, 2),
+};
+
/* DEVLINK_CMD_REGION_GET - do */
static const struct nla_policy devlink_region_get_do_nl_policy[DEVLINK_ATTR_REGION_NAME + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -114,6 +291,50 @@ static const struct nla_policy devlink_region_get_dump_nl_policy[DEVLINK_ATTR_DE
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_REGION_NEW - do */
+static const struct nla_policy devlink_region_new_nl_policy[DEVLINK_ATTR_REGION_SNAPSHOT_ID + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_REGION_DEL - do */
+static const struct nla_policy devlink_region_del_nl_policy[DEVLINK_ATTR_REGION_SNAPSHOT_ID + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_REGION_READ - dump */
+static const struct nla_policy devlink_region_read_nl_policy[DEVLINK_ATTR_REGION_DIRECT + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_REGION_DIRECT] = { .type = NLA_FLAG, },
+ [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64, },
+ [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64, },
+};
+
+/* DEVLINK_CMD_PORT_PARAM_GET - do */
+static const struct nla_policy devlink_port_param_get_nl_policy[DEVLINK_ATTR_PORT_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+};
+
+/* DEVLINK_CMD_PORT_PARAM_SET - do */
+static const struct nla_policy devlink_port_param_set_nl_policy[DEVLINK_ATTR_PORT_INDEX + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+};
+
/* DEVLINK_CMD_INFO_GET - do */
static const struct nla_policy devlink_info_get_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -135,6 +356,58 @@ static const struct nla_policy devlink_health_reporter_get_dump_nl_policy[DEVLIN
[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
};
+/* DEVLINK_CMD_HEALTH_REPORTER_SET - do */
+static const struct nla_policy devlink_health_reporter_set_nl_policy[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .type = NLA_U8, },
+};
+
+/* DEVLINK_CMD_HEALTH_REPORTER_RECOVER - do */
+static const struct nla_policy devlink_health_reporter_recover_nl_policy[DEVLINK_ATTR_HEALTH_REPORTER_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE - do */
+static const struct nla_policy devlink_health_reporter_diagnose_nl_policy[DEVLINK_ATTR_HEALTH_REPORTER_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET - dump */
+static const struct nla_policy devlink_health_reporter_dump_get_nl_policy[DEVLINK_ATTR_HEALTH_REPORTER_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR - do */
+static const struct nla_policy devlink_health_reporter_dump_clear_nl_policy[DEVLINK_ATTR_HEALTH_REPORTER_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_FLASH_UPDATE - do */
+static const struct nla_policy devlink_flash_update_nl_policy[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK] = NLA_POLICY_BITFIELD32(3),
+};
+
/* DEVLINK_CMD_TRAP_GET - do */
static const struct nla_policy devlink_trap_get_do_nl_policy[DEVLINK_ATTR_TRAP_NAME + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -148,6 +421,14 @@ static const struct nla_policy devlink_trap_get_dump_nl_policy[DEVLINK_ATTR_DEV_
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_TRAP_SET - do */
+static const struct nla_policy devlink_trap_set_nl_policy[DEVLINK_ATTR_TRAP_ACTION + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_TRAP_ACTION] = NLA_POLICY_MAX(NLA_U8, 2),
+};
+
/* DEVLINK_CMD_TRAP_GROUP_GET - do */
static const struct nla_policy devlink_trap_group_get_do_nl_policy[DEVLINK_ATTR_TRAP_GROUP_NAME + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -161,6 +442,15 @@ static const struct nla_policy devlink_trap_group_get_dump_nl_policy[DEVLINK_ATT
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_TRAP_GROUP_SET - do */
+static const struct nla_policy devlink_trap_group_set_nl_policy[DEVLINK_ATTR_TRAP_POLICER_ID + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_TRAP_ACTION] = NLA_POLICY_MAX(NLA_U8, 2),
+ [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32, },
+};
+
/* DEVLINK_CMD_TRAP_POLICER_GET - do */
static const struct nla_policy devlink_trap_policer_get_do_nl_policy[DEVLINK_ATTR_TRAP_POLICER_ID + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -174,6 +464,23 @@ static const struct nla_policy devlink_trap_policer_get_dump_nl_policy[DEVLINK_A
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_TRAP_POLICER_SET - do */
+static const struct nla_policy devlink_trap_policer_set_nl_policy[DEVLINK_ATTR_TRAP_POLICER_BURST + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64, },
+ [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64, },
+};
+
+/* DEVLINK_CMD_HEALTH_REPORTER_TEST - do */
+static const struct nla_policy devlink_health_reporter_test_nl_policy[DEVLINK_ATTR_HEALTH_REPORTER_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING, },
+};
+
/* DEVLINK_CMD_RATE_GET - do */
static const struct nla_policy devlink_rate_get_do_nl_policy[DEVLINK_ATTR_RATE_NODE_NAME + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -188,6 +495,37 @@ static const struct nla_policy devlink_rate_get_dump_nl_policy[DEVLINK_ATTR_DEV_
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_RATE_SET - do */
+static const struct nla_policy devlink_rate_set_nl_policy[DEVLINK_ATTR_RATE_TX_WEIGHT + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_RATE_TX_SHARE] = { .type = NLA_U64, },
+ [DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64, },
+ [DEVLINK_ATTR_RATE_TX_PRIORITY] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_RATE_TX_WEIGHT] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_RATE_NEW - do */
+static const struct nla_policy devlink_rate_new_nl_policy[DEVLINK_ATTR_RATE_TX_WEIGHT + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_RATE_TX_SHARE] = { .type = NLA_U64, },
+ [DEVLINK_ATTR_RATE_TX_MAX] = { .type = NLA_U64, },
+ [DEVLINK_ATTR_RATE_TX_PRIORITY] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_RATE_TX_WEIGHT] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+/* DEVLINK_CMD_RATE_DEL - do */
+static const struct nla_policy devlink_rate_del_nl_policy[DEVLINK_ATTR_RATE_NODE_NAME + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_RATE_NODE_NAME] = { .type = NLA_NUL_STRING, },
+};
+
/* DEVLINK_CMD_LINECARD_GET - do */
static const struct nla_policy devlink_linecard_get_do_nl_policy[DEVLINK_ATTR_LINECARD_INDEX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
@@ -201,14 +539,29 @@ static const struct nla_policy devlink_linecard_get_dump_nl_policy[DEVLINK_ATTR_
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_LINECARD_SET - do */
+static const struct nla_policy devlink_linecard_set_nl_policy[DEVLINK_ATTR_LINECARD_TYPE + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_LINECARD_INDEX] = { .type = NLA_U32, },
+ [DEVLINK_ATTR_LINECARD_TYPE] = { .type = NLA_NUL_STRING, },
+};
+
/* DEVLINK_CMD_SELFTESTS_GET - do */
static const struct nla_policy devlink_selftests_get_nl_policy[DEVLINK_ATTR_DEV_NAME + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
};
+/* DEVLINK_CMD_SELFTESTS_RUN - do */
+static const struct nla_policy devlink_selftests_run_nl_policy[DEVLINK_ATTR_SELFTESTS + 1] = {
+ [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
+ [DEVLINK_ATTR_SELFTESTS] = NLA_POLICY_NESTED(devlink_dl_selftest_id_nl_policy),
+};
+
/* Ops table for devlink */
-const struct genl_split_ops devlink_nl_ops[32] = {
+const struct genl_split_ops devlink_nl_ops[73] = {
{
.cmd = DEVLINK_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
@@ -243,6 +596,56 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_PORT_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_port_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_port_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_PORT_FUNCTION,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_NEW,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_port_new_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_port_new_nl_policy,
+ .maxattr = DEVLINK_ATTR_PORT_PCI_SF_NUMBER,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_DEL,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_port_del_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_port_del_nl_policy,
+ .maxattr = DEVLINK_ATTR_PORT_INDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_SPLIT,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_port_split_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_port_split_nl_policy,
+ .maxattr = DEVLINK_ATTR_PORT_SPLIT_COUNT,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_UNSPLIT,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_port_unsplit_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_port_unsplit_nl_policy,
+ .maxattr = DEVLINK_ATTR_PORT_INDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_SB_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit,
@@ -277,6 +680,16 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_SB_POOL_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_sb_pool_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_sb_pool_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit_port,
@@ -294,6 +707,16 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_sb_port_pool_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_sb_port_pool_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_SB_THRESHOLD,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit_port,
@@ -311,6 +734,126 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_sb_tc_pool_bind_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_sb_tc_pool_bind_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_SB_TC_INDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_sb_occ_snapshot_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_sb_occ_snapshot_nl_policy,
+ .maxattr = DEVLINK_ATTR_SB_INDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_sb_occ_max_clear_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_sb_occ_max_clear_nl_policy,
+ .maxattr = DEVLINK_ATTR_SB_INDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_ESWITCH_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_eswitch_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_eswitch_get_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_ESWITCH_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_eswitch_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_eswitch_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_ESWITCH_ENCAP_MODE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_dpipe_table_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_dpipe_table_get_nl_policy,
+ .maxattr = DEVLINK_ATTR_DPIPE_TABLE_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_dpipe_entries_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_dpipe_entries_get_nl_policy,
+ .maxattr = DEVLINK_ATTR_DPIPE_TABLE_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_dpipe_headers_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_dpipe_headers_get_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_dpipe_table_counters_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_dpipe_table_counters_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_RESOURCE_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_resource_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_resource_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_RESOURCE_SIZE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_RESOURCE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_resource_dump_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_resource_dump_nl_policy,
+ .maxattr = DEVLINK_ATTR_DEV_NAME,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_RELOAD,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_reload_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_reload_nl_policy,
+ .maxattr = DEVLINK_ATTR_RELOAD_LIMITS,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_PARAM_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit,
@@ -328,6 +871,16 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_PARAM_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_param_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_param_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_PARAM_VALUE_CMODE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_REGION_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit_port_optional,
@@ -345,6 +898,60 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_REGION_NEW,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port_optional,
+ .doit = devlink_nl_region_new_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_region_new_nl_policy,
+ .maxattr = DEVLINK_ATTR_REGION_SNAPSHOT_ID,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_REGION_DEL,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port_optional,
+ .doit = devlink_nl_region_del_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_region_del_nl_policy,
+ .maxattr = DEVLINK_ATTR_REGION_SNAPSHOT_ID,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_REGION_READ,
+ .validate = GENL_DONT_VALIDATE_DUMP_STRICT,
+ .dumpit = devlink_nl_region_read_dumpit,
+ .policy = devlink_region_read_nl_policy,
+ .maxattr = DEVLINK_ATTR_REGION_DIRECT,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_PARAM_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_port_param_get_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_port_param_get_nl_policy,
+ .maxattr = DEVLINK_ATTR_PORT_INDEX,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_PARAM_GET,
+ .validate = GENL_DONT_VALIDATE_DUMP_STRICT,
+ .dumpit = devlink_nl_port_param_get_dumpit,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_PARAM_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port,
+ .doit = devlink_nl_port_param_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_port_param_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_PORT_INDEX,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_INFO_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit,
@@ -378,6 +985,64 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port_optional,
+ .doit = devlink_nl_health_reporter_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_health_reporter_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port_optional,
+ .doit = devlink_nl_health_reporter_recover_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_health_reporter_recover_nl_policy,
+ .maxattr = DEVLINK_ATTR_HEALTH_REPORTER_NAME,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port_optional,
+ .doit = devlink_nl_health_reporter_diagnose_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_health_reporter_diagnose_nl_policy,
+ .maxattr = DEVLINK_ATTR_HEALTH_REPORTER_NAME,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
+ .validate = GENL_DONT_VALIDATE_DUMP_STRICT,
+ .dumpit = devlink_nl_health_reporter_dump_get_dumpit,
+ .policy = devlink_health_reporter_dump_get_nl_policy,
+ .maxattr = DEVLINK_ATTR_HEALTH_REPORTER_NAME,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port_optional,
+ .doit = devlink_nl_health_reporter_dump_clear_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_health_reporter_dump_clear_nl_policy,
+ .maxattr = DEVLINK_ATTR_HEALTH_REPORTER_NAME,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_FLASH_UPDATE,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_flash_update_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_flash_update_nl_policy,
+ .maxattr = DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_TRAP_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit,
@@ -395,6 +1060,16 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_TRAP_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_trap_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_trap_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_TRAP_ACTION,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_TRAP_GROUP_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit,
@@ -412,6 +1087,16 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_TRAP_GROUP_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_trap_group_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_trap_group_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_TRAP_POLICER_ID,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_TRAP_POLICER_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit,
@@ -429,6 +1114,26 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_TRAP_POLICER_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_trap_policer_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_trap_policer_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_TRAP_POLICER_BURST,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit_port_optional,
+ .doit = devlink_nl_health_reporter_test_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_health_reporter_test_nl_policy,
+ .maxattr = DEVLINK_ATTR_HEALTH_REPORTER_NAME,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_RATE_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit,
@@ -446,6 +1151,36 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_RATE_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_rate_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_rate_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_RATE_TX_WEIGHT,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_RATE_NEW,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_rate_new_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_rate_new_nl_policy,
+ .maxattr = DEVLINK_ATTR_RATE_TX_WEIGHT,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = DEVLINK_CMD_RATE_DEL,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_rate_del_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_rate_del_nl_policy,
+ .maxattr = DEVLINK_ATTR_RATE_NODE_NAME,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_LINECARD_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit,
@@ -463,6 +1198,16 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.flags = GENL_CMD_CAP_DUMP,
},
{
+ .cmd = DEVLINK_CMD_LINECARD_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_linecard_set_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_linecard_set_nl_policy,
+ .maxattr = DEVLINK_ATTR_LINECARD_TYPE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
.cmd = DEVLINK_CMD_SELFTESTS_GET,
.validate = GENL_DONT_VALIDATE_STRICT,
.pre_doit = devlink_nl_pre_doit,
@@ -478,4 +1223,14 @@ const struct genl_split_ops devlink_nl_ops[32] = {
.dumpit = devlink_nl_selftests_get_dumpit,
.flags = GENL_CMD_CAP_DUMP,
},
+ {
+ .cmd = DEVLINK_CMD_SELFTESTS_RUN,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .pre_doit = devlink_nl_pre_doit,
+ .doit = devlink_nl_selftests_run_doit,
+ .post_doit = devlink_nl_post_doit,
+ .policy = devlink_selftests_run_nl_policy,
+ .maxattr = DEVLINK_ATTR_SELFTESTS,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
};
diff --git a/net/devlink/netlink_gen.h b/net/devlink/netlink_gen.h
index f8bbc93e39be..0e9e89c31c31 100644
--- a/net/devlink/netlink_gen.h
+++ b/net/devlink/netlink_gen.h
@@ -11,8 +11,12 @@
#include <uapi/linux/devlink.h>
+/* Common nested types */
+extern const struct nla_policy devlink_dl_port_function_nl_policy[DEVLINK_PORT_FN_ATTR_CAPS + 1];
+extern const struct nla_policy devlink_dl_selftest_id_nl_policy[DEVLINK_ATTR_SELFTEST_ID_FLASH + 1];
+
/* Ops table for devlink */
-extern const struct genl_split_ops devlink_nl_ops[32];
+extern const struct genl_split_ops devlink_nl_ops[73];
int devlink_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
@@ -30,25 +34,61 @@ int devlink_nl_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int devlink_nl_port_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_port_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_port_set_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_port_new_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_port_del_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_port_split_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_port_unsplit_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_sb_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_sb_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int devlink_nl_sb_pool_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_sb_pool_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_sb_pool_set_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_sb_port_pool_get_doit(struct sk_buff *skb,
struct genl_info *info);
int devlink_nl_sb_port_pool_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_sb_port_pool_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
int devlink_nl_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
struct genl_info *info);
int devlink_nl_sb_tc_pool_bind_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_sb_occ_snapshot_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_sb_occ_max_clear_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_dpipe_table_get_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_dpipe_entries_get_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_dpipe_headers_get_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_dpipe_table_counters_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_resource_set_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_resource_dump_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_reload_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_param_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_param_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_param_set_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_region_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_region_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_region_new_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_region_del_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_region_read_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_port_param_get_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_port_param_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_port_param_set_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_info_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_info_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
@@ -56,24 +96,46 @@ int devlink_nl_health_reporter_get_doit(struct sk_buff *skb,
struct genl_info *info);
int devlink_nl_health_reporter_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_health_reporter_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_health_reporter_recover_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_health_reporter_diagnose_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_health_reporter_dump_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int devlink_nl_health_reporter_dump_clear_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_flash_update_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_trap_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_trap_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_trap_set_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_trap_group_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_trap_group_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_trap_group_set_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_trap_policer_get_doit(struct sk_buff *skb,
struct genl_info *info);
int devlink_nl_trap_policer_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_trap_policer_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_health_reporter_test_doit(struct sk_buff *skb,
+ struct genl_info *info);
int devlink_nl_rate_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_rate_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_rate_set_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_rate_new_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_rate_del_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_linecard_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_linecard_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_linecard_set_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_selftests_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_selftests_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int devlink_nl_selftests_run_doit(struct sk_buff *skb, struct genl_info *info);
#endif /* _LINUX_DEVLINK_GEN_H */
diff --git a/net/devlink/param.c b/net/devlink/param.c
index 31275f9d4cb7..d74df09311a9 100644
--- a/net/devlink/param.c
+++ b/net/devlink/param.c
@@ -581,7 +581,7 @@ static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
return 0;
}
-int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_param_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
@@ -589,22 +589,22 @@ int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, struct genl_info *info)
info, DEVLINK_CMD_PARAM_NEW);
}
-int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
+int devlink_nl_port_param_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
{
NL_SET_ERR_MSG(cb->extack, "Port params are not supported");
return msg->len;
}
-int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_port_param_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
NL_SET_ERR_MSG(info->extack, "Port params are not supported");
return -EINVAL;
}
-int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_port_param_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
NL_SET_ERR_MSG(info->extack, "Port params are not supported");
return -EINVAL;
diff --git a/net/devlink/port.c b/net/devlink/port.c
index 4e9003242448..7634f187fa50 100644
--- a/net/devlink/port.c
+++ b/net/devlink/port.c
@@ -772,7 +772,7 @@ static int devlink_port_function_set(struct devlink_port *port,
return err;
}
-int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_port_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
int err;
@@ -798,7 +798,7 @@ int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_port_split_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
struct devlink *devlink = info->user_ptr[0];
@@ -829,8 +829,7 @@ int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, struct genl_info *info)
info->extack);
}
-int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_port_unsplit_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
struct devlink *devlink = info->user_ptr[0];
@@ -840,7 +839,7 @@ int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
return devlink_port->ops->port_unsplit(devlink, devlink_port, info->extack);
}
-int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_port_new_doit(struct sk_buff *skb, struct genl_info *info)
{
struct netlink_ext_ack *extack = info->extack;
struct devlink_port_new_attrs new_attrs = {};
@@ -904,7 +903,7 @@ err_out_port_del:
return err;
}
-int devlink_nl_cmd_port_del_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_port_del_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
struct netlink_ext_ack *extack = info->extack;
diff --git a/net/devlink/rate.c b/net/devlink/rate.c
index dff1593b8406..94b289b93ff2 100644
--- a/net/devlink/rate.c
+++ b/net/devlink/rate.c
@@ -458,7 +458,7 @@ static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
return true;
}
-int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_rate_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_rate *devlink_rate;
@@ -480,7 +480,7 @@ int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb, struct genl_info *info)
return err;
}
-int devlink_nl_cmd_rate_new_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_rate_new_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_rate *rate_node;
@@ -536,7 +536,7 @@ err_strdup:
return err;
}
-int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_rate_del_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_rate *rate_node;
diff --git a/net/devlink/region.c b/net/devlink/region.c
index d197cdb662db..0aab7b82d678 100644
--- a/net/devlink/region.c
+++ b/net/devlink/region.c
@@ -588,7 +588,7 @@ int devlink_nl_region_get_dumpit(struct sk_buff *skb,
return devlink_nl_dumpit(skb, cb, devlink_nl_region_get_dump_one);
}
-int devlink_nl_cmd_region_del(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_region_del_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_snapshot *snapshot;
@@ -633,7 +633,7 @@ int devlink_nl_cmd_region_del(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-int devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_region_new_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_snapshot *snapshot;
@@ -863,8 +863,8 @@ devlink_region_direct_fill(void *cb_priv, u8 *chunk, u32 chunk_size,
curr_offset, chunk_size, chunk);
}
-int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
+int devlink_nl_region_read_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct devlink_nl_dump_state *state = devlink_dump_state(cb);
diff --git a/net/devlink/resource.c b/net/devlink/resource.c
index c8b615e4c385..594c8aeb3bfa 100644
--- a/net/devlink/resource.c
+++ b/net/devlink/resource.c
@@ -105,7 +105,7 @@ devlink_resource_validate_size(struct devlink_resource *resource, u64 size,
return err;
}
-int devlink_nl_cmd_resource_set(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_resource_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_resource *resource;
@@ -285,7 +285,7 @@ err_resource_put:
return err;
}
-int devlink_nl_cmd_resource_dump(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_resource_dump_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
diff --git a/net/devlink/sb.c b/net/devlink/sb.c
index bd677fff5ec8..0a76bb32502b 100644
--- a/net/devlink/sb.c
+++ b/net/devlink/sb.c
@@ -413,7 +413,7 @@ static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
return -EOPNOTSUPP;
}
-int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_sb_pool_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
enum devlink_sb_threshold_type threshold_type;
@@ -621,8 +621,8 @@ static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
return -EOPNOTSUPP;
}
-int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_sb_port_pool_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
struct devlink *devlink = info->user_ptr[0];
@@ -861,8 +861,8 @@ static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
return -EOPNOTSUPP;
}
-int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[1];
struct devlink *devlink = info->user_ptr[0];
@@ -900,8 +900,7 @@ int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
pool_index, threshold, info->extack);
}
-int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_sb_occ_snapshot_doit(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
const struct devlink_ops *ops = devlink->ops;
@@ -916,8 +915,8 @@ int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
return -EOPNOTSUPP;
}
-int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_sb_occ_max_clear_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
const struct devlink_ops *ops = devlink->ops;
diff --git a/net/devlink/trap.c b/net/devlink/trap.c
index c26bf9b29bca..c26313e7ca08 100644
--- a/net/devlink/trap.c
+++ b/net/devlink/trap.c
@@ -414,7 +414,7 @@ static int devlink_trap_action_set(struct devlink *devlink,
info->extack);
}
-int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb, struct genl_info *info)
+int devlink_nl_trap_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct netlink_ext_ack *extack = info->extack;
struct devlink *devlink = info->user_ptr[0];
@@ -684,8 +684,7 @@ static int devlink_trap_group_set(struct devlink *devlink,
return 0;
}
-int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_trap_group_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct netlink_ext_ack *extack = info->extack;
struct devlink *devlink = info->user_ptr[0];
@@ -926,8 +925,8 @@ devlink_trap_policer_set(struct devlink *devlink,
return 0;
}
-int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
- struct genl_info *info)
+int devlink_nl_trap_policer_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct devlink_trap_policer_item *policer_item;
struct netlink_ext_ack *extack = info->extack;
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 12e305824a96..8a1894a42552 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -8,16 +8,16 @@ endif
# the core
obj-$(CONFIG_NET_DSA) += dsa_core.o
dsa_core-y += \
+ conduit.o \
devlink.o \
dsa.o \
- master.o \
netlink.o \
port.o \
- slave.o \
switch.o \
tag.o \
tag_8021q.o \
- trace.o
+ trace.o \
+ user.o
# tagging formats
obj-$(CONFIG_NET_DSA_TAG_AR9331) += tag_ar9331.o
diff --git a/net/dsa/master.c b/net/dsa/conduit.c
index 6be89ab0cc01..3dfdb3cb47dc 100644
--- a/net/dsa/master.c
+++ b/net/dsa/conduit.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Handling of a master device, switching frames via its switch fabric CPU port
+ * Handling of a conduit device, switching frames via its switch fabric CPU port
*
* Copyright (c) 2017 Savoir-faire Linux Inc.
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
@@ -11,12 +11,12 @@
#include <linux/netlink.h>
#include <net/dsa.h>
+#include "conduit.h"
#include "dsa.h"
-#include "master.h"
#include "port.h"
#include "tag.h"
-static int dsa_master_get_regs_len(struct net_device *dev)
+static int dsa_conduit_get_regs_len(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -45,8 +45,8 @@ static int dsa_master_get_regs_len(struct net_device *dev)
return ret;
}
-static void dsa_master_get_regs(struct net_device *dev,
- struct ethtool_regs *regs, void *data)
+static void dsa_conduit_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -80,9 +80,9 @@ static void dsa_master_get_regs(struct net_device *dev,
}
}
-static void dsa_master_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats,
- uint64_t *data)
+static void dsa_conduit_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -99,9 +99,9 @@ static void dsa_master_get_ethtool_stats(struct net_device *dev,
ds->ops->get_ethtool_stats(ds, port, data + count);
}
-static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
- struct ethtool_stats *stats,
- uint64_t *data)
+static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -125,7 +125,7 @@ static void dsa_master_get_ethtool_phy_stats(struct net_device *dev,
ds->ops->get_ethtool_phy_stats(ds, port, data + count);
}
-static int dsa_master_get_sset_count(struct net_device *dev, int sset)
+static int dsa_conduit_get_sset_count(struct net_device *dev, int sset)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -147,8 +147,8 @@ static int dsa_master_get_sset_count(struct net_device *dev, int sset)
return count;
}
-static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
- uint8_t *data)
+static void dsa_conduit_get_strings(struct net_device *dev, uint32_t stringset,
+ uint8_t *data)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops;
@@ -195,12 +195,12 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
}
}
-/* Deny PTP operations on master if there is at least one switch in the tree
+/* Deny PTP operations on conduit if there is at least one switch in the tree
* that is PTP capable.
*/
-int __dsa_master_hwtstamp_validate(struct net_device *dev,
- const struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack)
+int __dsa_conduit_hwtstamp_validate(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch *ds = cpu_dp->ds;
@@ -212,7 +212,7 @@ int __dsa_master_hwtstamp_validate(struct net_device *dev,
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_supports_hwtstamp(dp)) {
NL_SET_ERR_MSG(extack,
- "HW timestamping not allowed on DSA master when switch supports the operation");
+ "HW timestamping not allowed on DSA conduit when switch supports the operation");
return -EBUSY;
}
}
@@ -220,7 +220,7 @@ int __dsa_master_hwtstamp_validate(struct net_device *dev,
return 0;
}
-static int dsa_master_ethtool_setup(struct net_device *dev)
+static int dsa_conduit_ethtool_setup(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch *ds = cpu_dp->ds;
@@ -237,19 +237,19 @@ static int dsa_master_ethtool_setup(struct net_device *dev)
if (cpu_dp->orig_ethtool_ops)
memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops));
- ops->get_regs_len = dsa_master_get_regs_len;
- ops->get_regs = dsa_master_get_regs;
- ops->get_sset_count = dsa_master_get_sset_count;
- ops->get_ethtool_stats = dsa_master_get_ethtool_stats;
- ops->get_strings = dsa_master_get_strings;
- ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats;
+ ops->get_regs_len = dsa_conduit_get_regs_len;
+ ops->get_regs = dsa_conduit_get_regs;
+ ops->get_sset_count = dsa_conduit_get_sset_count;
+ ops->get_ethtool_stats = dsa_conduit_get_ethtool_stats;
+ ops->get_strings = dsa_conduit_get_strings;
+ ops->get_ethtool_phy_stats = dsa_conduit_get_ethtool_phy_stats;
dev->ethtool_ops = ops;
return 0;
}
-static void dsa_master_ethtool_teardown(struct net_device *dev)
+static void dsa_conduit_ethtool_teardown(struct net_device *dev)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
@@ -260,16 +260,16 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
cpu_dp->orig_ethtool_ops = NULL;
}
-/* Keep the master always promiscuous if the tagging protocol requires that
+/* Keep the conduit always promiscuous if the tagging protocol requires that
* (garbles MAC DA) or if it doesn't support unicast filtering, case in which
* it would revert to promiscuous mode as soon as we call dev_uc_add() on it
* anyway.
*/
-static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
+static void dsa_conduit_set_promiscuity(struct net_device *dev, int inc)
{
const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
- if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
+ if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_conduit)
return;
ASSERT_RTNL();
@@ -336,17 +336,17 @@ out:
}
static DEVICE_ATTR_RW(tagging);
-static struct attribute *dsa_slave_attrs[] = {
+static struct attribute *dsa_user_attrs[] = {
&dev_attr_tagging.attr,
NULL
};
static const struct attribute_group dsa_group = {
.name = "dsa",
- .attrs = dsa_slave_attrs,
+ .attrs = dsa_user_attrs,
};
-static void dsa_master_reset_mtu(struct net_device *dev)
+static void dsa_conduit_reset_mtu(struct net_device *dev)
{
int err;
@@ -356,7 +356,7 @@ static void dsa_master_reset_mtu(struct net_device *dev)
"Unable to reset MTU to exclude DSA overheads\n");
}
-int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+int dsa_conduit_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
struct dsa_switch *ds = cpu_dp->ds;
@@ -365,7 +365,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
- /* The DSA master must use SET_NETDEV_DEV for this to work. */
+ /* The DSA conduit must use SET_NETDEV_DEV for this to work. */
if (!netif_is_lag_master(dev)) {
consumer_link = device_link_add(ds->dev, dev->dev.parent,
DL_FLAG_AUTOREMOVE_CONSUMER);
@@ -376,7 +376,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
}
/* The switch driver may not implement ->port_change_mtu(), case in
- * which dsa_slave_change_mtu() will not update the master MTU either,
+ * which dsa_user_change_mtu() will not update the conduit MTU either,
* so we need to do that here.
*/
ret = dev_set_mtu(dev, mtu);
@@ -392,9 +392,9 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
dev->dsa_ptr = cpu_dp;
- dsa_master_set_promiscuity(dev, 1);
+ dsa_conduit_set_promiscuity(dev, 1);
- ret = dsa_master_ethtool_setup(dev);
+ ret = dsa_conduit_ethtool_setup(dev);
if (ret)
goto out_err_reset_promisc;
@@ -405,18 +405,18 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
return ret;
out_err_ethtool_teardown:
- dsa_master_ethtool_teardown(dev);
+ dsa_conduit_ethtool_teardown(dev);
out_err_reset_promisc:
- dsa_master_set_promiscuity(dev, -1);
+ dsa_conduit_set_promiscuity(dev, -1);
return ret;
}
-void dsa_master_teardown(struct net_device *dev)
+void dsa_conduit_teardown(struct net_device *dev)
{
sysfs_remove_group(&dev->dev.kobj, &dsa_group);
- dsa_master_ethtool_teardown(dev);
- dsa_master_reset_mtu(dev);
- dsa_master_set_promiscuity(dev, -1);
+ dsa_conduit_ethtool_teardown(dev);
+ dsa_conduit_reset_mtu(dev);
+ dsa_conduit_set_promiscuity(dev, -1);
dev->dsa_ptr = NULL;
@@ -427,40 +427,40 @@ void dsa_master_teardown(struct net_device *dev)
wmb();
}
-int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
- struct netdev_lag_upper_info *uinfo,
- struct netlink_ext_ack *extack)
+int dsa_conduit_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
+ struct netdev_lag_upper_info *uinfo,
+ struct netlink_ext_ack *extack)
{
- bool master_setup = false;
+ bool conduit_setup = false;
int err;
if (!netdev_uses_dsa(lag_dev)) {
- err = dsa_master_setup(lag_dev, cpu_dp);
+ err = dsa_conduit_setup(lag_dev, cpu_dp);
if (err)
return err;
- master_setup = true;
+ conduit_setup = true;
}
err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
if (err) {
NL_SET_ERR_MSG_WEAK_MOD(extack, "CPU port failed to join LAG");
- goto out_master_teardown;
+ goto out_conduit_teardown;
}
return 0;
-out_master_teardown:
- if (master_setup)
- dsa_master_teardown(lag_dev);
+out_conduit_teardown:
+ if (conduit_setup)
+ dsa_conduit_teardown(lag_dev);
return err;
}
-/* Tear down a master if there isn't any other user port on it,
+/* Tear down a conduit if there isn't any other user port on it,
* optionally also destroying LAG information.
*/
-void dsa_master_lag_teardown(struct net_device *lag_dev,
- struct dsa_port *cpu_dp)
+void dsa_conduit_lag_teardown(struct net_device *lag_dev,
+ struct dsa_port *cpu_dp)
{
struct net_device *upper;
struct list_head *iter;
@@ -468,8 +468,8 @@ void dsa_master_lag_teardown(struct net_device *lag_dev,
dsa_port_lag_leave(cpu_dp, lag_dev);
netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
- if (dsa_slave_dev_check(upper))
+ if (dsa_user_dev_check(upper))
return;
- dsa_master_teardown(lag_dev);
+ dsa_conduit_teardown(lag_dev);
}
diff --git a/net/dsa/conduit.h b/net/dsa/conduit.h
new file mode 100644
index 000000000000..31f8834f54bb
--- /dev/null
+++ b/net/dsa/conduit.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __DSA_CONDUIT_H
+#define __DSA_CONDUIT_H
+
+struct dsa_port;
+struct net_device;
+struct netdev_lag_upper_info;
+struct netlink_ext_ack;
+
+int dsa_conduit_setup(struct net_device *dev, struct dsa_port *cpu_dp);
+void dsa_conduit_teardown(struct net_device *dev);
+int dsa_conduit_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
+ struct netdev_lag_upper_info *uinfo,
+ struct netlink_ext_ack *extack);
+void dsa_conduit_lag_teardown(struct net_device *lag_dev,
+ struct dsa_port *cpu_dp);
+int __dsa_conduit_hwtstamp_validate(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
+#endif
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index ccbdb98109f8..ac7be864e80d 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -20,14 +20,14 @@
#include <net/dsa_stubs.h>
#include <net/sch_generic.h>
+#include "conduit.h"
#include "devlink.h"
#include "dsa.h"
-#include "master.h"
#include "netlink.h"
#include "port.h"
-#include "slave.h"
#include "switch.h"
#include "tag.h"
+#include "user.h"
#define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
@@ -365,18 +365,18 @@ static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
return NULL;
}
-struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst)
+struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst)
{
struct device_node *ethernet;
- struct net_device *master;
+ struct net_device *conduit;
struct dsa_port *cpu_dp;
cpu_dp = dsa_tree_find_first_cpu(dst);
ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0);
- master = of_find_net_device_by_node(ethernet);
+ conduit = of_find_net_device_by_node(ethernet);
of_node_put(ethernet);
- return master;
+ return conduit;
}
/* Assign the default CPU port (the first one in the tree) to all ports of the
@@ -517,7 +517,7 @@ static int dsa_port_setup(struct dsa_port *dp)
break;
case DSA_PORT_TYPE_USER:
of_get_mac_address(dp->dn, dp->mac);
- err = dsa_slave_create(dp);
+ err = dsa_user_create(dp);
break;
}
@@ -554,9 +554,9 @@ static void dsa_port_teardown(struct dsa_port *dp)
dsa_shared_port_link_unregister_of(dp);
break;
case DSA_PORT_TYPE_USER:
- if (dp->slave) {
- dsa_slave_destroy(dp->slave);
- dp->slave = NULL;
+ if (dp->user) {
+ dsa_user_destroy(dp->user);
+ dp->user = NULL;
}
break;
}
@@ -632,9 +632,9 @@ static int dsa_switch_setup(struct dsa_switch *ds)
if (ds->setup)
return 0;
- /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
+ /* Initialize ds->phys_mii_mask before registering the user MDIO bus
* driver and before ops->setup() has run, since the switch drivers and
- * the slave MDIO bus driver rely on these values for probing PHY
+ * the user MDIO bus driver rely on these values for probing PHY
* devices or not
*/
ds->phys_mii_mask |= dsa_user_ports(ds);
@@ -657,21 +657,21 @@ static int dsa_switch_setup(struct dsa_switch *ds)
if (err)
goto teardown;
- if (!ds->slave_mii_bus && ds->ops->phy_read) {
- ds->slave_mii_bus = mdiobus_alloc();
- if (!ds->slave_mii_bus) {
+ if (!ds->user_mii_bus && ds->ops->phy_read) {
+ ds->user_mii_bus = mdiobus_alloc();
+ if (!ds->user_mii_bus) {
err = -ENOMEM;
goto teardown;
}
- dsa_slave_mii_bus_init(ds);
+ dsa_user_mii_bus_init(ds);
dn = of_get_child_by_name(ds->dev->of_node, "mdio");
- err = of_mdiobus_register(ds->slave_mii_bus, dn);
+ err = of_mdiobus_register(ds->user_mii_bus, dn);
of_node_put(dn);
if (err < 0)
- goto free_slave_mii_bus;
+ goto free_user_mii_bus;
}
dsa_switch_devlink_register(ds);
@@ -679,9 +679,9 @@ static int dsa_switch_setup(struct dsa_switch *ds)
ds->setup = true;
return 0;
-free_slave_mii_bus:
- if (ds->slave_mii_bus && ds->ops->phy_read)
- mdiobus_free(ds->slave_mii_bus);
+free_user_mii_bus:
+ if (ds->user_mii_bus && ds->ops->phy_read)
+ mdiobus_free(ds->user_mii_bus);
teardown:
if (ds->ops->teardown)
ds->ops->teardown(ds);
@@ -699,10 +699,10 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
dsa_switch_devlink_unregister(ds);
- if (ds->slave_mii_bus && ds->ops->phy_read) {
- mdiobus_unregister(ds->slave_mii_bus);
- mdiobus_free(ds->slave_mii_bus);
- ds->slave_mii_bus = NULL;
+ if (ds->user_mii_bus && ds->ops->phy_read) {
+ mdiobus_unregister(ds->user_mii_bus);
+ mdiobus_free(ds->user_mii_bus);
+ ds->user_mii_bus = NULL;
}
dsa_switch_teardown_tag_protocol(ds);
@@ -793,7 +793,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
return err;
}
-static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
+static int dsa_tree_setup_conduit(struct dsa_switch_tree *dst)
{
struct dsa_port *cpu_dp;
int err = 0;
@@ -801,18 +801,18 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
rtnl_lock();
dsa_tree_for_each_cpu_port(cpu_dp, dst) {
- struct net_device *master = cpu_dp->master;
- bool admin_up = (master->flags & IFF_UP) &&
- !qdisc_tx_is_noop(master);
+ struct net_device *conduit = cpu_dp->conduit;
+ bool admin_up = (conduit->flags & IFF_UP) &&
+ !qdisc_tx_is_noop(conduit);
- err = dsa_master_setup(master, cpu_dp);
+ err = dsa_conduit_setup(conduit, cpu_dp);
if (err)
break;
- /* Replay master state event */
- dsa_tree_master_admin_state_change(dst, master, admin_up);
- dsa_tree_master_oper_state_change(dst, master,
- netif_oper_up(master));
+ /* Replay conduit state event */
+ dsa_tree_conduit_admin_state_change(dst, conduit, admin_up);
+ dsa_tree_conduit_oper_state_change(dst, conduit,
+ netif_oper_up(conduit));
}
rtnl_unlock();
@@ -820,22 +820,22 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
return err;
}
-static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
+static void dsa_tree_teardown_conduit(struct dsa_switch_tree *dst)
{
struct dsa_port *cpu_dp;
rtnl_lock();
dsa_tree_for_each_cpu_port(cpu_dp, dst) {
- struct net_device *master = cpu_dp->master;
+ struct net_device *conduit = cpu_dp->conduit;
/* Synthesizing an "admin down" state is sufficient for
- * the switches to get a notification if the master is
+ * the switches to get a notification if the conduit is
* currently up and running.
*/
- dsa_tree_master_admin_state_change(dst, master, false);
+ dsa_tree_conduit_admin_state_change(dst, conduit, false);
- dsa_master_teardown(master);
+ dsa_conduit_teardown(conduit);
}
rtnl_unlock();
@@ -894,13 +894,13 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
if (err)
goto teardown_switches;
- err = dsa_tree_setup_master(dst);
+ err = dsa_tree_setup_conduit(dst);
if (err)
goto teardown_ports;
err = dsa_tree_setup_lags(dst);
if (err)
- goto teardown_master;
+ goto teardown_conduit;
dst->setup = true;
@@ -908,8 +908,8 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
return 0;
-teardown_master:
- dsa_tree_teardown_master(dst);
+teardown_conduit:
+ dsa_tree_teardown_conduit(dst);
teardown_ports:
dsa_tree_teardown_ports(dst);
teardown_switches:
@@ -929,7 +929,7 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dsa_tree_teardown_lags(dst);
- dsa_tree_teardown_master(dst);
+ dsa_tree_teardown_conduit(dst);
dsa_tree_teardown_ports(dst);
@@ -978,7 +978,7 @@ out_disconnect:
return err;
}
-/* Since the dsa/tagging sysfs device attribute is per master, the assumption
+/* Since the dsa/tagging sysfs device attribute is per conduit, the assumption
* is that all DSA switches within a tree share the same tagger, otherwise
* they would have formed disjoint trees (different "dsa,member" values).
*/
@@ -999,10 +999,10 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
* restriction, there needs to be another mutex which serializes this.
*/
dsa_tree_for_each_user_port(dp, dst) {
- if (dsa_port_to_master(dp)->flags & IFF_UP)
+ if (dsa_port_to_conduit(dp)->flags & IFF_UP)
goto out_unlock;
- if (dp->slave->flags & IFF_UP)
+ if (dp->user->flags & IFF_UP)
goto out_unlock;
}
@@ -1028,62 +1028,62 @@ out_unlock:
return err;
}
-static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
- struct net_device *master)
+static void dsa_tree_conduit_state_change(struct dsa_switch_tree *dst,
+ struct net_device *conduit)
{
- struct dsa_notifier_master_state_info info;
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_notifier_conduit_state_info info;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
- info.master = master;
- info.operational = dsa_port_master_is_operational(cpu_dp);
+ info.conduit = conduit;
+ info.operational = dsa_port_conduit_is_operational(cpu_dp);
- dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
+ dsa_tree_notify(dst, DSA_NOTIFIER_CONDUIT_STATE_CHANGE, &info);
}
-void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
- struct net_device *master,
- bool up)
+void dsa_tree_conduit_admin_state_change(struct dsa_switch_tree *dst,
+ struct net_device *conduit,
+ bool up)
{
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
bool notify = false;
- /* Don't keep track of admin state on LAG DSA masters,
- * but rather just of physical DSA masters
+ /* Don't keep track of admin state on LAG DSA conduits,
+ * but rather just of physical DSA conduits
*/
- if (netif_is_lag_master(master))
+ if (netif_is_lag_master(conduit))
return;
- if ((dsa_port_master_is_operational(cpu_dp)) !=
- (up && cpu_dp->master_oper_up))
+ if ((dsa_port_conduit_is_operational(cpu_dp)) !=
+ (up && cpu_dp->conduit_oper_up))
notify = true;
- cpu_dp->master_admin_up = up;
+ cpu_dp->conduit_admin_up = up;
if (notify)
- dsa_tree_master_state_change(dst, master);
+ dsa_tree_conduit_state_change(dst, conduit);
}
-void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
- struct net_device *master,
- bool up)
+void dsa_tree_conduit_oper_state_change(struct dsa_switch_tree *dst,
+ struct net_device *conduit,
+ bool up)
{
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
bool notify = false;
- /* Don't keep track of oper state on LAG DSA masters,
- * but rather just of physical DSA masters
+ /* Don't keep track of oper state on LAG DSA conduits,
+ * but rather just of physical DSA conduits
*/
- if (netif_is_lag_master(master))
+ if (netif_is_lag_master(conduit))
return;
- if ((dsa_port_master_is_operational(cpu_dp)) !=
- (cpu_dp->master_admin_up && up))
+ if ((dsa_port_conduit_is_operational(cpu_dp)) !=
+ (cpu_dp->conduit_admin_up && up))
notify = true;
- cpu_dp->master_oper_up = up;
+ cpu_dp->conduit_oper_up = up;
if (notify)
- dsa_tree_master_state_change(dst, master);
+ dsa_tree_conduit_state_change(dst, conduit);
}
static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
@@ -1129,7 +1129,7 @@ static int dsa_port_parse_dsa(struct dsa_port *dp)
}
static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
- struct net_device *master)
+ struct net_device *conduit)
{
enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
struct dsa_switch *mds, *ds = dp->ds;
@@ -1140,21 +1140,21 @@ static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
* happens the switch driver may want to know if its tagging protocol
* is going to work in such a configuration.
*/
- if (dsa_slave_dev_check(master)) {
- mdp = dsa_slave_to_port(master);
+ if (dsa_user_dev_check(conduit)) {
+ mdp = dsa_user_to_port(conduit);
mds = mdp->ds;
mdp_upstream = dsa_upstream_port(mds, mdp->index);
tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
DSA_TAG_PROTO_NONE);
}
- /* If the master device is not itself a DSA slave in a disjoint DSA
+ /* If the conduit device is not itself a DSA user in a disjoint DSA
* tree, then return immediately.
*/
return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
}
-static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
+static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *conduit,
const char *user_protocol)
{
const struct dsa_device_ops *tag_ops = NULL;
@@ -1163,7 +1163,7 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
enum dsa_tag_protocol default_proto;
/* Find out which protocol the switch would prefer. */
- default_proto = dsa_get_tag_protocol(dp, master);
+ default_proto = dsa_get_tag_protocol(dp, conduit);
if (dst->default_proto) {
if (dst->default_proto != default_proto) {
dev_err(ds->dev,
@@ -1218,7 +1218,7 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
dst->tag_ops = tag_ops;
}
- dp->master = master;
+ dp->conduit = conduit;
dp->type = DSA_PORT_TYPE_CPU;
dsa_port_set_tag_protocol(dp, dst->tag_ops);
dp->dst = dst;
@@ -1248,16 +1248,16 @@ static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
dp->dn = dn;
if (ethernet) {
- struct net_device *master;
+ struct net_device *conduit;
const char *user_protocol;
- master = of_find_net_device_by_node(ethernet);
+ conduit = of_find_net_device_by_node(ethernet);
of_node_put(ethernet);
- if (!master)
+ if (!conduit)
return -EPROBE_DEFER;
user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
- return dsa_port_parse_cpu(dp, master, user_protocol);
+ return dsa_port_parse_cpu(dp, conduit, user_protocol);
}
if (link)
@@ -1412,15 +1412,15 @@ static int dsa_port_parse(struct dsa_port *dp, const char *name,
struct device *dev)
{
if (!strcmp(name, "cpu")) {
- struct net_device *master;
+ struct net_device *conduit;
- master = dsa_dev_to_net_device(dev);
- if (!master)
+ conduit = dsa_dev_to_net_device(dev);
+ if (!conduit)
return -EPROBE_DEFER;
- dev_put(master);
+ dev_put(conduit);
- return dsa_port_parse_cpu(dp, master, NULL);
+ return dsa_port_parse_cpu(dp, conduit, NULL);
}
if (!strcmp(name, "dsa"))
@@ -1566,14 +1566,14 @@ void dsa_unregister_switch(struct dsa_switch *ds)
}
EXPORT_SYMBOL_GPL(dsa_unregister_switch);
-/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
+/* If the DSA conduit chooses to unregister its net_device on .shutdown, DSA is
* blocking that operation from completion, due to the dev_hold taken inside
- * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
- * the DSA master, so that the system can reboot successfully.
+ * netdev_upper_dev_link. Unlink the DSA user interfaces from being uppers of
+ * the DSA conduit, so that the system can reboot successfully.
*/
void dsa_switch_shutdown(struct dsa_switch *ds)
{
- struct net_device *master, *slave_dev;
+ struct net_device *conduit, *user_dev;
struct dsa_port *dp;
mutex_lock(&dsa2_mutex);
@@ -1584,17 +1584,17 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
rtnl_lock();
dsa_switch_for_each_user_port(dp, ds) {
- master = dsa_port_to_master(dp);
- slave_dev = dp->slave;
+ conduit = dsa_port_to_conduit(dp);
+ user_dev = dp->user;
- netdev_upper_dev_unlink(master, slave_dev);
+ netdev_upper_dev_unlink(conduit, user_dev);
}
- /* Disconnect from further netdevice notifiers on the master,
+ /* Disconnect from further netdevice notifiers on the conduit,
* since netdev_uses_dsa() will now return false.
*/
dsa_switch_for_each_cpu_port(dp, ds)
- dp->master->dsa_ptr = NULL;
+ dp->conduit->dsa_ptr = NULL;
rtnl_unlock();
out:
@@ -1605,7 +1605,7 @@ EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
#ifdef CONFIG_PM_SLEEP
static bool dsa_port_is_initialized(const struct dsa_port *dp)
{
- return dp->type == DSA_PORT_TYPE_USER && dp->slave;
+ return dp->type == DSA_PORT_TYPE_USER && dp->user;
}
int dsa_switch_suspend(struct dsa_switch *ds)
@@ -1613,12 +1613,12 @@ int dsa_switch_suspend(struct dsa_switch *ds)
struct dsa_port *dp;
int ret = 0;
- /* Suspend slave network devices */
+ /* Suspend user network devices */
dsa_switch_for_each_port(dp, ds) {
if (!dsa_port_is_initialized(dp))
continue;
- ret = dsa_slave_suspend(dp->slave);
+ ret = dsa_user_suspend(dp->user);
if (ret)
return ret;
}
@@ -1641,12 +1641,12 @@ int dsa_switch_resume(struct dsa_switch *ds)
if (ret)
return ret;
- /* Resume slave network devices */
+ /* Resume user network devices */
dsa_switch_for_each_port(dp, ds) {
if (!dsa_port_is_initialized(dp))
continue;
- ret = dsa_slave_resume(dp->slave);
+ ret = dsa_user_resume(dp->user);
if (ret)
return ret;
}
@@ -1658,10 +1658,10 @@ EXPORT_SYMBOL_GPL(dsa_switch_resume);
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
{
- if (!netdev || !dsa_slave_dev_check(netdev))
+ if (!netdev || !dsa_user_dev_check(netdev))
return ERR_PTR(-ENODEV);
- return dsa_slave_to_port(netdev);
+ return dsa_user_to_port(netdev);
}
EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
@@ -1726,7 +1726,7 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
static const struct dsa_stubs __dsa_stubs = {
- .master_hwtstamp_validate = __dsa_master_hwtstamp_validate,
+ .conduit_hwtstamp_validate = __dsa_conduit_hwtstamp_validate,
};
static void dsa_register_stubs(void)
@@ -1748,7 +1748,7 @@ static int __init dsa_init_module(void)
if (!dsa_owq)
return -ENOMEM;
- rc = dsa_slave_register_notifier();
+ rc = dsa_user_register_notifier();
if (rc)
goto register_notifier_fail;
@@ -1763,7 +1763,7 @@ static int __init dsa_init_module(void)
return 0;
netlink_register_fail:
- dsa_slave_unregister_notifier();
+ dsa_user_unregister_notifier();
dev_remove_pack(&dsa_pack_type);
register_notifier_fail:
destroy_workqueue(dsa_owq);
@@ -1778,7 +1778,7 @@ static void __exit dsa_cleanup_module(void)
rtnl_link_unregister(&dsa_link_ops);
- dsa_slave_unregister_notifier();
+ dsa_user_unregister_notifier();
dev_remove_pack(&dsa_pack_type);
destroy_workqueue(dsa_owq);
}
diff --git a/net/dsa/dsa.h b/net/dsa/dsa.h
index b7e17ae1094d..3cc7823e9ef3 100644
--- a/net/dsa/dsa.h
+++ b/net/dsa/dsa.h
@@ -21,16 +21,16 @@ void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag);
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag);
struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
const struct net_device *lag_dev);
-struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst);
+struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst);
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
const struct dsa_device_ops *tag_ops,
const struct dsa_device_ops *old_tag_ops);
-void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
- struct net_device *master,
+void dsa_tree_conduit_admin_state_change(struct dsa_switch_tree *dst,
+ struct net_device *conduit,
+ bool up);
+void dsa_tree_conduit_oper_state_change(struct dsa_switch_tree *dst,
+ struct net_device *conduit,
bool up);
-void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
- struct net_device *master,
- bool up);
unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
void dsa_bridge_num_put(const struct net_device *bridge_dev,
unsigned int bridge_num);
diff --git a/net/dsa/master.h b/net/dsa/master.h
deleted file mode 100644
index 76e39d3ec909..000000000000
--- a/net/dsa/master.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-
-#ifndef __DSA_MASTER_H
-#define __DSA_MASTER_H
-
-struct dsa_port;
-struct net_device;
-struct netdev_lag_upper_info;
-struct netlink_ext_ack;
-
-int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
-void dsa_master_teardown(struct net_device *dev);
-int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
- struct netdev_lag_upper_info *uinfo,
- struct netlink_ext_ack *extack);
-void dsa_master_lag_teardown(struct net_device *lag_dev,
- struct dsa_port *cpu_dp);
-int __dsa_master_hwtstamp_validate(struct net_device *dev,
- const struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack);
-
-#endif
diff --git a/net/dsa/netlink.c b/net/dsa/netlink.c
index bd4bbaf851de..1332e56349e5 100644
--- a/net/dsa/netlink.c
+++ b/net/dsa/netlink.c
@@ -5,10 +5,10 @@
#include <net/rtnetlink.h>
#include "netlink.h"
-#include "slave.h"
+#include "user.h"
static const struct nla_policy dsa_policy[IFLA_DSA_MAX + 1] = {
- [IFLA_DSA_MASTER] = { .type = NLA_U32 },
+ [IFLA_DSA_CONDUIT] = { .type = NLA_U32 },
};
static int dsa_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -20,15 +20,15 @@ static int dsa_changelink(struct net_device *dev, struct nlattr *tb[],
if (!data)
return 0;
- if (data[IFLA_DSA_MASTER]) {
- u32 ifindex = nla_get_u32(data[IFLA_DSA_MASTER]);
- struct net_device *master;
+ if (data[IFLA_DSA_CONDUIT]) {
+ u32 ifindex = nla_get_u32(data[IFLA_DSA_CONDUIT]);
+ struct net_device *conduit;
- master = __dev_get_by_index(dev_net(dev), ifindex);
- if (!master)
+ conduit = __dev_get_by_index(dev_net(dev), ifindex);
+ if (!conduit)
return -EINVAL;
- err = dsa_slave_change_master(dev, master, extack);
+ err = dsa_user_change_conduit(dev, conduit, extack);
if (err)
return err;
}
@@ -38,15 +38,15 @@ static int dsa_changelink(struct net_device *dev, struct nlattr *tb[],
static size_t dsa_get_size(const struct net_device *dev)
{
- return nla_total_size(sizeof(u32)) + /* IFLA_DSA_MASTER */
+ return nla_total_size(sizeof(u32)) + /* IFLA_DSA_CONDUIT */
0;
}
static int dsa_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
- struct net_device *master = dsa_slave_to_master(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
- if (nla_put_u32(skb, IFLA_DSA_MASTER, master->ifindex))
+ if (nla_put_u32(skb, IFLA_DSA_CONDUIT, conduit->ifindex))
return -EMSGSIZE;
return 0;
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 6e0d000a97c4..c42dac87671b 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -14,9 +14,9 @@
#include "dsa.h"
#include "port.h"
-#include "slave.h"
#include "switch.h"
#include "tag_8021q.h"
+#include "user.h"
/**
* dsa_port_notify - Notify the switching fabric of changes to a port
@@ -289,7 +289,7 @@ static void dsa_port_reset_vlan_filtering(struct dsa_port *dp,
}
/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
- * event for changing vlan_filtering setting upon slave ports leaving
+ * event for changing vlan_filtering setting upon user ports leaving
* it. That is a good thing, because that lets us handle it and also
* handle the case where the switch's vlan_filtering setting is global
* (not per port). When that happens, the correct moment to trigger the
@@ -489,7 +489,7 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
.dp = dp,
.extack = extack,
};
- struct net_device *dev = dp->slave;
+ struct net_device *dev = dp->user;
struct net_device *brport_dev;
int err;
@@ -514,8 +514,8 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
dp->bridge->tx_fwd_offload = info.tx_fwd_offload;
err = switchdev_bridge_port_offload(brport_dev, dev, dp,
- &dsa_slave_switchdev_notifier,
- &dsa_slave_switchdev_blocking_notifier,
+ &dsa_user_switchdev_notifier,
+ &dsa_user_switchdev_blocking_notifier,
dp->bridge->tx_fwd_offload, extack);
if (err)
goto out_rollback_unbridge;
@@ -528,8 +528,8 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
out_rollback_unoffload:
switchdev_bridge_port_unoffload(brport_dev, dp,
- &dsa_slave_switchdev_notifier,
- &dsa_slave_switchdev_blocking_notifier);
+ &dsa_user_switchdev_notifier,
+ &dsa_user_switchdev_blocking_notifier);
dsa_flush_workqueue();
out_rollback_unbridge:
dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
@@ -547,8 +547,8 @@ void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
return;
switchdev_bridge_port_unoffload(brport_dev, dp,
- &dsa_slave_switchdev_notifier,
- &dsa_slave_switchdev_blocking_notifier);
+ &dsa_user_switchdev_notifier,
+ &dsa_user_switchdev_blocking_notifier);
dsa_flush_workqueue();
}
@@ -741,10 +741,10 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
*/
if (vlan_filtering && dsa_port_is_user(dp)) {
struct net_device *br = dsa_port_bridge_dev_get(dp);
- struct net_device *upper_dev, *slave = dp->slave;
+ struct net_device *upper_dev, *user = dp->user;
struct list_head *iter;
- netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
+ netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
struct bridge_vlan_info br_info;
u16 vid;
@@ -803,9 +803,9 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
if (!ds->ops->port_vlan_filtering)
return -EOPNOTSUPP;
- /* We are called from dsa_slave_switchdev_blocking_event(),
+ /* We are called from dsa_user_switchdev_blocking_event(),
* which is not under rcu_read_lock(), unlike
- * dsa_slave_switchdev_event().
+ * dsa_user_switchdev_event().
*/
rcu_read_lock();
apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
@@ -827,24 +827,24 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
ds->vlan_filtering = vlan_filtering;
dsa_switch_for_each_user_port(other_dp, ds) {
- struct net_device *slave = other_dp->slave;
+ struct net_device *user = other_dp->user;
/* We might be called in the unbind path, so not
- * all slave devices might still be registered.
+ * all user devices might still be registered.
*/
- if (!slave)
+ if (!user)
continue;
- err = dsa_slave_manage_vlan_filtering(slave,
- vlan_filtering);
+ err = dsa_user_manage_vlan_filtering(user,
+ vlan_filtering);
if (err)
goto restore;
}
} else {
dp->vlan_filtering = vlan_filtering;
- err = dsa_slave_manage_vlan_filtering(dp->slave,
- vlan_filtering);
+ err = dsa_user_manage_vlan_filtering(dp->user,
+ vlan_filtering);
if (err)
goto restore;
}
@@ -863,7 +863,7 @@ restore:
}
/* This enforces legacy behavior for switch drivers which assume they can't
- * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
+ * receive VLAN configuration when joining a bridge with vlan_filtering=0
*/
bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
{
@@ -1047,7 +1047,7 @@ int dsa_port_standalone_host_fdb_add(struct dsa_port *dp,
int dsa_port_bridge_host_fdb_add(struct dsa_port *dp,
const unsigned char *addr, u16 vid)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
@@ -1057,12 +1057,12 @@ int dsa_port_bridge_host_fdb_add(struct dsa_port *dp,
if (!dp->ds->fdb_isolation)
db.bridge.num = 0;
- /* Avoid a call to __dev_set_promiscuity() on the master, which
+ /* Avoid a call to __dev_set_promiscuity() on the conduit, which
* requires rtnl_lock(), since we can't guarantee that is held here,
* and we can't take it either.
*/
- if (master->priv_flags & IFF_UNICAST_FLT) {
- err = dev_uc_add(master, addr);
+ if (conduit->priv_flags & IFF_UNICAST_FLT) {
+ err = dev_uc_add(conduit, addr);
if (err)
return err;
}
@@ -1098,7 +1098,7 @@ int dsa_port_standalone_host_fdb_del(struct dsa_port *dp,
int dsa_port_bridge_host_fdb_del(struct dsa_port *dp,
const unsigned char *addr, u16 vid)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
@@ -1108,8 +1108,8 @@ int dsa_port_bridge_host_fdb_del(struct dsa_port *dp,
if (!dp->ds->fdb_isolation)
db.bridge.num = 0;
- if (master->priv_flags & IFF_UNICAST_FLT) {
- err = dev_uc_del(master, addr);
+ if (conduit->priv_flags & IFF_UNICAST_FLT) {
+ err = dev_uc_del(conduit, addr);
if (err)
return err;
}
@@ -1229,7 +1229,7 @@ int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp,
int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
@@ -1239,7 +1239,7 @@ int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp,
if (!dp->ds->fdb_isolation)
db.bridge.num = 0;
- err = dev_mc_add(master, mdb->addr);
+ err = dev_mc_add(conduit, mdb->addr);
if (err)
return err;
@@ -1273,7 +1273,7 @@ int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp,
int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
const struct switchdev_obj_port_mdb *mdb)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_db db = {
.type = DSA_DB_BRIDGE,
.bridge = *dp->bridge,
@@ -1283,7 +1283,7 @@ int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp,
if (!dp->ds->fdb_isolation)
db.bridge.num = 0;
- err = dev_mc_del(master, mdb->addr);
+ err = dev_mc_del(conduit, mdb->addr);
if (err)
return err;
@@ -1318,7 +1318,7 @@ int dsa_port_host_vlan_add(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_notifier_vlan_info info = {
.dp = dp,
.vlan = vlan,
@@ -1330,7 +1330,7 @@ int dsa_port_host_vlan_add(struct dsa_port *dp,
if (err && err != -EOPNOTSUPP)
return err;
- vlan_vid_add(master, htons(ETH_P_8021Q), vlan->vid);
+ vlan_vid_add(conduit, htons(ETH_P_8021Q), vlan->vid);
return err;
}
@@ -1338,7 +1338,7 @@ int dsa_port_host_vlan_add(struct dsa_port *dp,
int dsa_port_host_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan)
{
- struct net_device *master = dsa_port_to_master(dp);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_notifier_vlan_info info = {
.dp = dp,
.vlan = vlan,
@@ -1349,7 +1349,7 @@ int dsa_port_host_vlan_del(struct dsa_port *dp,
if (err && err != -EOPNOTSUPP)
return err;
- vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
+ vlan_vid_del(conduit, htons(ETH_P_8021Q), vlan->vid);
return err;
}
@@ -1398,24 +1398,24 @@ int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp);
}
-static int dsa_port_assign_master(struct dsa_port *dp,
- struct net_device *master,
- struct netlink_ext_ack *extack,
- bool fail_on_err)
+static int dsa_port_assign_conduit(struct dsa_port *dp,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack,
+ bool fail_on_err)
{
struct dsa_switch *ds = dp->ds;
int port = dp->index, err;
- err = ds->ops->port_change_master(ds, port, master, extack);
+ err = ds->ops->port_change_conduit(ds, port, conduit, extack);
if (err && !fail_on_err)
- dev_err(ds->dev, "port %d failed to assign master %s: %pe\n",
- port, master->name, ERR_PTR(err));
+ dev_err(ds->dev, "port %d failed to assign conduit %s: %pe\n",
+ port, conduit->name, ERR_PTR(err));
if (err && fail_on_err)
return err;
- dp->cpu_dp = master->dsa_ptr;
- dp->cpu_port_in_lag = netif_is_lag_master(master);
+ dp->cpu_dp = conduit->dsa_ptr;
+ dp->cpu_port_in_lag = netif_is_lag_master(conduit);
return 0;
}
@@ -1428,12 +1428,12 @@ static int dsa_port_assign_master(struct dsa_port *dp,
* the old CPU port before changing it, and restore it on errors during the
* bringup of the new one.
*/
-int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
- struct netlink_ext_ack *extack)
+int dsa_port_change_conduit(struct dsa_port *dp, struct net_device *conduit,
+ struct netlink_ext_ack *extack)
{
struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
- struct net_device *old_master = dsa_port_to_master(dp);
- struct net_device *dev = dp->slave;
+ struct net_device *old_conduit = dsa_port_to_conduit(dp);
+ struct net_device *dev = dp->user;
struct dsa_switch *ds = dp->ds;
bool vlan_filtering;
int err, tmp;
@@ -1454,7 +1454,7 @@ int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
*/
vlan_filtering = dsa_port_is_vlan_filtering(dp);
if (vlan_filtering) {
- err = dsa_slave_manage_vlan_filtering(dev, false);
+ err = dsa_user_manage_vlan_filtering(dev, false);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to remove standalone VLANs");
@@ -1465,16 +1465,16 @@ int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
/* Standalone addresses, and addresses of upper interfaces like
* VLAN, LAG, HSR need to be migrated.
*/
- dsa_slave_unsync_ha(dev);
+ dsa_user_unsync_ha(dev);
- err = dsa_port_assign_master(dp, master, extack, true);
+ err = dsa_port_assign_conduit(dp, conduit, extack, true);
if (err)
goto rewind_old_addrs;
- dsa_slave_sync_ha(dev);
+ dsa_user_sync_ha(dev);
if (vlan_filtering) {
- err = dsa_slave_manage_vlan_filtering(dev, true);
+ err = dsa_user_manage_vlan_filtering(dev, true);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to restore standalone VLANs");
@@ -1495,19 +1495,19 @@ int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
rewind_new_vlan:
if (vlan_filtering)
- dsa_slave_manage_vlan_filtering(dev, false);
+ dsa_user_manage_vlan_filtering(dev, false);
rewind_new_addrs:
- dsa_slave_unsync_ha(dev);
+ dsa_user_unsync_ha(dev);
- dsa_port_assign_master(dp, old_master, NULL, false);
+ dsa_port_assign_conduit(dp, old_conduit, NULL, false);
/* Restore the objects on the old CPU port */
rewind_old_addrs:
- dsa_slave_sync_ha(dev);
+ dsa_user_sync_ha(dev);
if (vlan_filtering) {
- tmp = dsa_slave_manage_vlan_filtering(dev, true);
+ tmp = dsa_user_manage_vlan_filtering(dev, true);
if (tmp) {
dev_err(ds->dev,
"port %d failed to restore standalone VLANs: %pe\n",
@@ -1620,7 +1620,7 @@ static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
struct dsa_switch *ds = dp->ds;
if (dsa_port_is_user(dp))
- phydev = dp->slave->phydev;
+ phydev = dp->user->phydev;
if (!ds->ops->phylink_mac_link_down) {
if (ds->ops->adjust_link && phydev)
@@ -1808,7 +1808,7 @@ err_phy_connect:
* their type.
*
* User ports with no phy-handle or fixed-link are expected to connect to an
- * internal PHY located on the ds->slave_mii_bus at an MDIO address equal to
+ * internal PHY located on the ds->user_mii_bus at an MDIO address equal to
* the port number. This description is still actively supported.
*
* Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to
@@ -1829,7 +1829,7 @@ err_phy_connect:
* a fixed-link, a phy-handle, or a managed = "in-band-status" property.
* It becomes the responsibility of the driver to ensure that these ports
* operate at the maximum speed (whatever this means) and will interoperate
- * with the DSA master or other cascade port, since phylink methods will not be
+ * with the DSA conduit or other cascade port, since phylink methods will not be
* invoked for them.
*
* If you are considering expanding this table for newly introduced switches,
diff --git a/net/dsa/port.h b/net/dsa/port.h
index 334879964e2c..6bc3291573c0 100644
--- a/net/dsa/port.h
+++ b/net/dsa/port.h
@@ -109,7 +109,7 @@ void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc);
-int dsa_port_change_master(struct dsa_port *dp, struct net_device *master,
- struct netlink_ext_ack *extack);
+int dsa_port_change_conduit(struct dsa_port *dp, struct net_device *conduit,
+ struct netlink_ext_ack *extack);
#endif
diff --git a/net/dsa/slave.h b/net/dsa/slave.h
deleted file mode 100644
index d0abe609e00d..000000000000
--- a/net/dsa/slave.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-
-#ifndef __DSA_SLAVE_H
-#define __DSA_SLAVE_H
-
-#include <linux/if_bridge.h>
-#include <linux/if_vlan.h>
-#include <linux/list.h>
-#include <linux/netpoll.h>
-#include <linux/types.h>
-#include <net/dsa.h>
-#include <net/gro_cells.h>
-
-struct net_device;
-struct netlink_ext_ack;
-
-extern struct notifier_block dsa_slave_switchdev_notifier;
-extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
-
-struct dsa_slave_priv {
- /* Copy of CPU port xmit for faster access in slave transmit hot path */
- struct sk_buff * (*xmit)(struct sk_buff *skb,
- struct net_device *dev);
-
- struct gro_cells gcells;
-
- /* DSA port data, such as switch, port index, etc. */
- struct dsa_port *dp;
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
- struct netpoll *netpoll;
-#endif
-
- /* TC context */
- struct list_head mall_tc_list;
-};
-
-void dsa_slave_mii_bus_init(struct dsa_switch *ds);
-int dsa_slave_create(struct dsa_port *dp);
-void dsa_slave_destroy(struct net_device *slave_dev);
-int dsa_slave_suspend(struct net_device *slave_dev);
-int dsa_slave_resume(struct net_device *slave_dev);
-int dsa_slave_register_notifier(void);
-void dsa_slave_unregister_notifier(void);
-void dsa_slave_sync_ha(struct net_device *dev);
-void dsa_slave_unsync_ha(struct net_device *dev);
-void dsa_slave_setup_tagger(struct net_device *slave);
-int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
-int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
- struct netlink_ext_ack *extack);
-int dsa_slave_manage_vlan_filtering(struct net_device *dev,
- bool vlan_filtering);
-
-static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
-{
- struct dsa_slave_priv *p = netdev_priv(dev);
-
- return p->dp;
-}
-
-static inline struct net_device *
-dsa_slave_to_master(const struct net_device *dev)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
-
- return dsa_port_to_master(dp);
-}
-
-#endif
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 1a42f9317334..3d2feeea897b 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -15,10 +15,10 @@
#include "dsa.h"
#include "netlink.h"
#include "port.h"
-#include "slave.h"
#include "switch.h"
#include "tag_8021q.h"
#include "trace.h"
+#include "user.h"
static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
@@ -894,12 +894,12 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
* bits that depend on the tagger, such as the MTU.
*/
dsa_switch_for_each_user_port(dp, ds) {
- struct net_device *slave = dp->slave;
+ struct net_device *user = dp->user;
- dsa_slave_setup_tagger(slave);
+ dsa_user_setup_tagger(user);
/* rtnl_mutex is held in dsa_tree_change_tag_proto */
- dsa_slave_change_mtu(slave, slave->mtu);
+ dsa_user_change_mtu(user, user->mtu);
}
return 0;
@@ -960,13 +960,13 @@ dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
}
static int
-dsa_switch_master_state_change(struct dsa_switch *ds,
- struct dsa_notifier_master_state_info *info)
+dsa_switch_conduit_state_change(struct dsa_switch *ds,
+ struct dsa_notifier_conduit_state_info *info)
{
- if (!ds->ops->master_state_change)
+ if (!ds->ops->conduit_state_change)
return 0;
- ds->ops->master_state_change(ds, info->master, info->operational);
+ ds->ops->conduit_state_change(ds, info->conduit, info->operational);
return 0;
}
@@ -1056,8 +1056,8 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
err = dsa_switch_tag_8021q_vlan_del(ds, info);
break;
- case DSA_NOTIFIER_MASTER_STATE_CHANGE:
- err = dsa_switch_master_state_change(ds, info);
+ case DSA_NOTIFIER_CONDUIT_STATE_CHANGE:
+ err = dsa_switch_conduit_state_change(ds, info);
break;
default:
err = -EOPNOTSUPP;
diff --git a/net/dsa/switch.h b/net/dsa/switch.h
index ea034677da15..be0a2749cd97 100644
--- a/net/dsa/switch.h
+++ b/net/dsa/switch.h
@@ -34,7 +34,7 @@ enum {
DSA_NOTIFIER_TAG_PROTO_DISCONNECT,
DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
- DSA_NOTIFIER_MASTER_STATE_CHANGE,
+ DSA_NOTIFIER_CONDUIT_STATE_CHANGE,
};
/* DSA_NOTIFIER_AGEING_TIME */
@@ -105,9 +105,9 @@ struct dsa_notifier_tag_8021q_vlan_info {
u16 vid;
};
-/* DSA_NOTIFIER_MASTER_STATE_CHANGE */
-struct dsa_notifier_master_state_info {
- const struct net_device *master;
+/* DSA_NOTIFIER_CONDUIT_STATE_CHANGE */
+struct dsa_notifier_conduit_state_info {
+ const struct net_device *conduit;
bool operational;
};
diff --git a/net/dsa/tag.c b/net/dsa/tag.c
index 5105a5ff58fa..6e402d49afd3 100644
--- a/net/dsa/tag.c
+++ b/net/dsa/tag.c
@@ -13,8 +13,8 @@
#include <net/dsa.h>
#include <net/dst_metadata.h>
-#include "slave.h"
#include "tag.h"
+#include "user.h"
static LIST_HEAD(dsa_tag_drivers_list);
static DEFINE_MUTEX(dsa_tag_drivers_lock);
@@ -27,7 +27,7 @@ static DEFINE_MUTEX(dsa_tag_drivers_lock);
* switch, the DSA driver owning the interface to which the packet is
* delivered is never notified unless we do so here.
*/
-static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
+static bool dsa_skb_defer_rx_timestamp(struct dsa_user_priv *p,
struct sk_buff *skb)
{
struct dsa_switch *ds = p->dp->ds;
@@ -57,7 +57,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
struct metadata_dst *md_dst = skb_metadata_dst(skb);
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct sk_buff *nskb = NULL;
- struct dsa_slave_priv *p;
+ struct dsa_user_priv *p;
if (unlikely(!cpu_dp)) {
kfree_skb(skb);
@@ -75,7 +75,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb_has_extensions(skb))
skb->slow_gro = 0;
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (likely(skb->dev)) {
dsa_default_offload_fwd_mark(skb);
nskb = skb;
@@ -94,7 +94,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
- if (unlikely(!dsa_slave_dev_check(skb->dev))) {
+ if (unlikely(!dsa_user_dev_check(skb->dev))) {
/* Packet is to be injected directly on an upper
* device, e.g. a team/bond, so skip all DSA-port
* specific actions.
diff --git a/net/dsa/tag.h b/net/dsa/tag.h
index 32d12f4a9d73..f6b9c73718df 100644
--- a/net/dsa/tag.h
+++ b/net/dsa/tag.h
@@ -9,7 +9,7 @@
#include <net/dsa.h>
#include "port.h"
-#include "slave.h"
+#include "user.h"
struct dsa_tag_driver {
const struct dsa_device_ops *ops;
@@ -29,7 +29,7 @@ static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
return ops->needed_headroom + ops->needed_tailroom;
}
-static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
+static inline struct net_device *dsa_conduit_find_user(struct net_device *dev,
int device, int port)
{
struct dsa_port *cpu_dp = dev->dsa_ptr;
@@ -39,7 +39,7 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
list_for_each_entry(dp, &dst->ports, list)
if (dp->ds->index == device && dp->index == port &&
dp->type == DSA_PORT_TYPE_USER)
- return dp->slave;
+ return dp->user;
return NULL;
}
@@ -49,7 +49,7 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
*/
static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
{
- struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct net_device *dev = skb->dev;
struct net_device *upper_dev;
@@ -107,12 +107,12 @@ static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
* to support termination through the bridge.
*/
static inline struct net_device *
-dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
+dsa_find_designated_bridge_port_by_vid(struct net_device *conduit, u16 vid)
{
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct bridge_vlan_info vinfo;
- struct net_device *slave;
+ struct net_device *user;
struct dsa_port *dp;
int err;
@@ -134,13 +134,13 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
if (dp->cpu_dp != cpu_dp)
continue;
- slave = dp->slave;
+ user = dp->user;
- err = br_vlan_get_info_rcu(slave, vid, &vinfo);
+ err = br_vlan_get_info_rcu(user, vid, &vinfo);
if (err)
continue;
- return slave;
+ return user;
}
return NULL;
@@ -155,7 +155,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
*/
static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
{
- struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
skb->offload_fwd_mark = !!(dp->bridge);
}
@@ -215,9 +215,9 @@ static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
}
-/* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from
+/* On RX, eth_type_trans() on the DSA conduit pulls ETH_HLEN bytes starting from
* skb_mac_header(skb), which leaves skb->data pointing at the first byte after
- * what the DSA master perceives as the EtherType (the beginning of the L3
+ * what the DSA conduit perceives as the EtherType (the beginning of the L3
* protocol). Since DSA EtherType header taggers treat the EtherType as part of
* the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
* is located 2 bytes behind skb->data. Note that EtherType in this context
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index cbdfc392f7e0..71b26ae6db39 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -73,7 +73,7 @@ struct dsa_tag_8021q_vlan {
struct dsa_8021q_context {
struct dsa_switch *ds;
struct list_head vlans;
- /* EtherType of RX VID, used for filtering on master interface */
+ /* EtherType of RX VID, used for filtering on conduit interface */
__be16 proto;
};
@@ -338,7 +338,7 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_port *dp = dsa_to_port(ds, port);
u16 vid = dsa_tag_8021q_standalone_vid(dp);
- struct net_device *master;
+ struct net_device *conduit;
int err;
/* The CPU port is implicitly configured by
@@ -347,7 +347,7 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
if (!dsa_port_is_user(dp))
return 0;
- master = dsa_port_to_master(dp);
+ conduit = dsa_port_to_conduit(dp);
err = dsa_port_tag_8021q_vlan_add(dp, vid, false);
if (err) {
@@ -357,8 +357,8 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port)
return err;
}
- /* Add the VLAN to the master's RX filter. */
- vlan_vid_add(master, ctx->proto, vid);
+ /* Add the VLAN to the conduit's RX filter. */
+ vlan_vid_add(conduit, ctx->proto, vid);
return err;
}
@@ -368,7 +368,7 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port)
struct dsa_8021q_context *ctx = ds->tag_8021q_ctx;
struct dsa_port *dp = dsa_to_port(ds, port);
u16 vid = dsa_tag_8021q_standalone_vid(dp);
- struct net_device *master;
+ struct net_device *conduit;
/* The CPU port is implicitly configured by
* configuring the front-panel ports
@@ -376,11 +376,11 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port)
if (!dsa_port_is_user(dp))
return;
- master = dsa_port_to_master(dp);
+ conduit = dsa_port_to_conduit(dp);
dsa_port_tag_8021q_vlan_del(dp, vid, false);
- vlan_vid_del(master, ctx->proto, vid);
+ vlan_vid_del(conduit, ctx->proto, vid);
}
static int dsa_tag_8021q_setup(struct dsa_switch *ds)
@@ -468,10 +468,10 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
-struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master,
+struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
int vbid)
{
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *dp;
@@ -490,7 +490,7 @@ struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master,
continue;
if (dsa_port_bridge_num_get(dp) == vbid)
- return dp->slave;
+ return dp->user;
}
return NULL;
diff --git a/net/dsa/tag_8021q.h b/net/dsa/tag_8021q.h
index b75cbaa028ef..41f7167ac520 100644
--- a/net/dsa/tag_8021q.h
+++ b/net/dsa/tag_8021q.h
@@ -16,7 +16,7 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
int *vbid);
-struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master,
+struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit,
int vbid);
int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
diff --git a/net/dsa/tag_ar9331.c b/net/dsa/tag_ar9331.c
index 7f3b7d730b85..92ce67b93a58 100644
--- a/net/dsa/tag_ar9331.c
+++ b/net/dsa/tag_ar9331.c
@@ -29,7 +29,7 @@
static struct sk_buff *ar9331_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
__le16 *phdr;
u16 hdr;
@@ -74,7 +74,7 @@ static struct sk_buff *ar9331_tag_rcv(struct sk_buff *skb,
/* Get source port information */
port = FIELD_GET(AR9331_HDR_PORT_NUM_MASK, hdr);
- skb->dev = dsa_master_find_slave(ndev, 0, port);
+ skb->dev = dsa_conduit_find_user(ndev, 0, port);
if (!skb->dev)
return NULL;
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index cacdafb41200..83d283a5d27e 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -85,7 +85,7 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
struct net_device *dev,
unsigned int offset)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
u16 queue = skb_get_queue_mapping(skb);
u8 *brcm_tag;
@@ -96,7 +96,7 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*
- * Let dsa_slave_xmit() free the SKB
+ * Let dsa_user_xmit() free the SKB
*/
if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false))
return NULL;
@@ -119,7 +119,7 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
brcm_tag[3] = (1 << dp->index) & BRCM_IG_DSTMAP1_MASK;
- /* Now tell the master network device about the desired output queue
+ /* Now tell the conduit network device about the desired output queue
* as well
*/
skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(dp->index, queue));
@@ -164,7 +164,7 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
/* Locate which port this is coming from */
source_port = brcm_tag[3] & BRCM_EG_PID_MASK;
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
if (!skb->dev)
return NULL;
@@ -216,7 +216,7 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM, BRCM_NAME);
static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
u8 *brcm_tag;
/* The Ethernet switch we are interfaced with needs packets to be at
@@ -226,7 +226,7 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
* (including FCS and tag) because the length verification is done after
* the Broadcom tag is stripped off the ingress packet.
*
- * Let dsa_slave_xmit() free the SKB
+ * Let dsa_user_xmit() free the SKB
*/
if (__skb_put_padto(skb, ETH_ZLEN + BRCM_LEG_TAG_LEN, false))
return NULL;
@@ -264,7 +264,7 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
if (!skb->dev)
return NULL;
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 1fd7fa26db64..8ed52dd663ab 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -129,7 +129,7 @@ enum dsa_code {
static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
u8 extra)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct net_device *br_dev;
u8 tag_dev, tag_port;
enum dsa_cmd cmd;
@@ -267,14 +267,14 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1);
skb->dev = lag ? lag->dev : NULL;
} else {
- skb->dev = dsa_master_find_slave(dev, source_device,
+ skb->dev = dsa_conduit_find_user(dev, source_device,
source_port);
}
if (!skb->dev)
return NULL;
- /* When using LAG offload, skb->dev is not a DSA slave interface,
+ /* When using LAG offload, skb->dev is not a DSA user interface,
* so we cannot call dsa_default_offload_fwd_mark and we need to
* special-case it.
*/
diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c
index e279cd9057b0..3539141b5350 100644
--- a/net/dsa/tag_gswip.c
+++ b/net/dsa/tag_gswip.c
@@ -61,7 +61,7 @@
static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
u8 *gswip_tag;
skb_push(skb, GSWIP_TX_HEADER_LEN);
@@ -89,7 +89,7 @@ static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
/* Get source port information */
port = (gswip_tag[7] & GSWIP_RX_SPPID_MASK) >> GSWIP_RX_SPPID_SHIFT;
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev)
return NULL;
diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
index 03a1fb9c87a9..6e233cd0aa38 100644
--- a/net/dsa/tag_hellcreek.c
+++ b/net/dsa/tag_hellcreek.c
@@ -20,7 +20,7 @@
static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
u8 *tag;
/* Calculate checksums (if required) before adding the trailer tag to
@@ -45,7 +45,7 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
u8 *tag = skb_tail_pointer(skb) - HELLCREEK_TAG_LEN;
unsigned int port = tag[0] & 0x03;
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev) {
netdev_warn_once(dev, "Failed to get source port: %d\n", port);
return NULL;
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 3632e47dea9e..9be341fa88f0 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -87,7 +87,7 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
struct net_device *dev,
unsigned int port, unsigned int len)
{
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev)
return NULL;
@@ -119,7 +119,7 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct ethhdr *hdr;
u8 *tag;
@@ -256,7 +256,7 @@ static struct sk_buff *ksz_defer_xmit(struct dsa_port *dp, struct sk_buff *skb)
return NULL;
kthread_init_work(&xmit_work->work, xmit_work_fn);
- /* Increase refcount so the kfree_skb in dsa_slave_xmit
+ /* Increase refcount so the kfree_skb in dsa_user_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
@@ -272,7 +272,7 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
{
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 prio = netdev_txq_to_tc(dev, queue_mapping);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct ethhdr *hdr;
__be16 *tag;
u16 val;
@@ -344,7 +344,7 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb,
{
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 prio = netdev_txq_to_tc(dev, queue_mapping);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct ethhdr *hdr;
u8 *tag;
@@ -410,7 +410,7 @@ static struct sk_buff *lan937x_xmit(struct sk_buff *skb,
{
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 prio = netdev_txq_to_tc(dev, queue_mapping);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
const struct ethhdr *hdr = eth_hdr(skb);
__be16 *tag;
u16 val;
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index c25f5536706b..1ed8ee24855d 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -56,7 +56,7 @@ static int lan9303_xmit_use_arl(struct dsa_port *dp, u8 *dest_addr)
static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
__be16 *lan9303_tag;
u16 tag;
@@ -99,7 +99,7 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
source_port = lan9303_tag1 & 0x3;
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
if (!skb->dev) {
dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n");
return NULL;
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index 40af80452747..2483785f6ab1 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -23,7 +23,7 @@
static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
u8 xmit_tpid;
u8 *mtk_tag;
@@ -85,7 +85,7 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev)
/* Get source port information */
port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK);
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev)
return NULL;
diff --git a/net/dsa/tag_none.c b/net/dsa/tag_none.c
index d2fd179c4227..9a473624db50 100644
--- a/net/dsa/tag_none.c
+++ b/net/dsa/tag_none.c
@@ -12,8 +12,8 @@
#define NONE_NAME "none"
-static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static struct sk_buff *dsa_user_notag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
/* Just return the original SKB */
return skb;
@@ -22,7 +22,7 @@ static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
static const struct dsa_device_ops none_ops = {
.name = NONE_NAME,
.proto = DSA_TAG_PROTO_NONE,
- .xmit = dsa_slave_notag_xmit,
+ .xmit = dsa_user_notag_xmit,
};
module_dsa_tag_driver(none_ops);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index 20bf7074d5a6..ef2f8fffb2c7 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -45,7 +45,7 @@ static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
__be32 ifh_prefix, void **ifh)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
struct dsa_switch *ds = dp->ds;
u64 vlan_tci, tag_type;
void *injection;
@@ -79,7 +79,7 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
void *injection;
ocelot_xmit_common(skb, netdev, cpu_to_be32(0x8880000a), &injection);
@@ -91,7 +91,7 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
static struct sk_buff *seville_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
void *injection;
ocelot_xmit_common(skb, netdev, cpu_to_be32(0x88800005), &injection);
@@ -111,12 +111,12 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
u16 vlan_tpid;
u64 rew_val;
- /* Revert skb->data by the amount consumed by the DSA master,
+ /* Revert skb->data by the amount consumed by the DSA conduit,
* so it points to the beginning of the frame.
*/
skb_push(skb, ETH_HLEN);
/* We don't care about the short prefix, it is just for easy entrance
- * into the DSA master's RX filter. Discard it now by moving it into
+ * into the DSA conduit's RX filter. Discard it now by moving it into
* the headroom.
*/
skb_pull(skb, OCELOT_SHORT_PREFIX_LEN);
@@ -141,12 +141,12 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
ocelot_xfh_get_vlan_tci(extraction, &vlan_tci);
ocelot_xfh_get_rew_val(extraction, &rew_val);
- skb->dev = dsa_master_find_slave(netdev, 0, src_port);
+ skb->dev = dsa_conduit_find_user(netdev, 0, src_port);
if (!skb->dev)
/* The switch will reflect back some frames sent through
- * sockets opened on the bare DSA master. These will come back
+ * sockets opened on the bare DSA conduit. These will come back
* with src_port equal to the index of the CPU port, for which
- * there is no slave registered. So don't print any error
+ * there is no user registered. So don't print any error
* message here (ignore and drop those frames).
*/
return NULL;
@@ -170,7 +170,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
* equal to the pvid of the ingress port and should not be used for
* processing.
*/
- dp = dsa_slave_to_port(skb->dev);
+ dp = dsa_user_to_port(skb->dev);
vlan_tpid = tag_type ? ETH_P_8021AD : ETH_P_8021Q;
if (dsa_port_is_vlan_filtering(dp) &&
@@ -192,7 +192,7 @@ static const struct dsa_device_ops ocelot_netdev_ops = {
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
.needed_headroom = OCELOT_TOTAL_TAG_LEN,
- .promisc_on_master = true,
+ .promisc_on_conduit = true,
};
DSA_TAG_DRIVER(ocelot_netdev_ops);
@@ -204,7 +204,7 @@ static const struct dsa_device_ops seville_netdev_ops = {
.xmit = seville_xmit,
.rcv = ocelot_rcv,
.needed_headroom = OCELOT_TOTAL_TAG_LEN,
- .promisc_on_master = true,
+ .promisc_on_conduit = true,
};
DSA_TAG_DRIVER(seville_netdev_ops);
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index 1f0b8c20eba5..210039320888 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -37,8 +37,8 @@ static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
return NULL;
/* PTP over IP packets need UDP checksumming. We may have inherited
- * NETIF_F_HW_CSUM from the DSA master, but these packets are not sent
- * through the DSA master, so calculate the checksum here.
+ * NETIF_F_HW_CSUM from the DSA conduit, but these packets are not sent
+ * through the DSA conduit, so calculate the checksum here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
return NULL;
@@ -49,7 +49,7 @@ static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
/* Calls felix_port_deferred_xmit in felix.c */
kthread_init_work(&xmit_work->work, xmit_work_fn);
- /* Increase refcount so the kfree_skb in dsa_slave_xmit
+ /* Increase refcount so the kfree_skb in dsa_user_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
@@ -63,7 +63,7 @@ static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
@@ -83,7 +83,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
dsa_8021q_rcv(skb, &src_port, &switch_id, NULL);
- skb->dev = dsa_master_find_slave(netdev, switch_id, src_port);
+ skb->dev = dsa_conduit_find_user(netdev, switch_id, src_port);
if (!skb->dev)
return NULL;
@@ -130,7 +130,7 @@ static const struct dsa_device_ops ocelot_8021q_netdev_ops = {
.connect = ocelot_connect,
.disconnect = ocelot_disconnect,
.needed_headroom = VLAN_HLEN,
- .promisc_on_master = true,
+ .promisc_on_conduit = true,
};
MODULE_LICENSE("GPL v2");
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index e5ff7c34e577..6514aa7993ce 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -14,7 +14,7 @@
static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
__be16 *phdr;
u16 hdr;
@@ -78,7 +78,7 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev)
/* Get source port information */
port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, hdr);
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev)
return NULL;
@@ -116,7 +116,7 @@ static const struct dsa_device_ops qca_netdev_ops = {
.xmit = qca_tag_xmit,
.rcv = qca_tag_rcv,
.needed_headroom = QCA_HDR_LEN,
- .promisc_on_master = true,
+ .promisc_on_conduit = true,
};
MODULE_LICENSE("GPL");
diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
index c327314b95e3..4da5bad1a7aa 100644
--- a/net/dsa/tag_rtl4_a.c
+++ b/net/dsa/tag_rtl4_a.c
@@ -36,7 +36,7 @@
static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
__be16 *p;
u8 *tag;
u16 out;
@@ -97,9 +97,9 @@ static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb,
}
port = protport & 0xff;
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev) {
- netdev_dbg(dev, "could not find slave for port %d\n", port);
+ netdev_dbg(dev, "could not find user for port %d\n", port);
return NULL;
}
diff --git a/net/dsa/tag_rtl8_4.c b/net/dsa/tag_rtl8_4.c
index 4f67834fd121..07e857debabf 100644
--- a/net/dsa/tag_rtl8_4.c
+++ b/net/dsa/tag_rtl8_4.c
@@ -103,7 +103,7 @@
static void rtl8_4_write_tag(struct sk_buff *skb, struct net_device *dev,
void *tag)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
__be16 tag16[RTL8_4_TAG_LEN / 2];
/* Set Realtek EtherType */
@@ -180,10 +180,10 @@ static int rtl8_4_read_tag(struct sk_buff *skb, struct net_device *dev,
/* Parse TX (switch->CPU) */
port = FIELD_GET(RTL8_4_TX, ntohs(tag16[3]));
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev) {
dev_warn_ratelimited(&dev->dev,
- "could not find slave for port %d\n",
+ "could not find user for port %d\n",
port);
return -ENOENT;
}
diff --git a/net/dsa/tag_rzn1_a5psw.c b/net/dsa/tag_rzn1_a5psw.c
index 437a6820ac42..2ce866b45615 100644
--- a/net/dsa/tag_rzn1_a5psw.c
+++ b/net/dsa/tag_rzn1_a5psw.c
@@ -39,7 +39,7 @@ struct a5psw_tag {
static struct sk_buff *a5psw_tag_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct a5psw_tag *ptag;
u32 data2_val;
@@ -90,7 +90,7 @@ static struct sk_buff *a5psw_tag_rcv(struct sk_buff *skb,
port = FIELD_GET(A5PSW_CTRL_DATA_PORT, ntohs(tag->ctrl_data));
- skb->dev = dsa_master_find_slave(dev, 0, port);
+ skb->dev = dsa_conduit_find_user(dev, 0, port);
if (!skb->dev)
return NULL;
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index ade3eeb2f3e6..1fffe8c2b589 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -157,7 +157,7 @@ static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
return NULL;
kthread_init_work(&xmit_work->work, xmit_work_fn);
- /* Increase refcount so the kfree_skb in dsa_slave_xmit
+ /* Increase refcount so the kfree_skb in dsa_user_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
@@ -210,7 +210,7 @@ static u16 sja1105_xmit_tpid(struct dsa_port *dp)
static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
unsigned int bridge_num = dsa_port_bridge_num_get(dp);
struct net_device *br = dsa_port_bridge_dev_get(dp);
u16 tx_vid;
@@ -235,7 +235,7 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
/* Transform untagged control packets into pvid-tagged control packets so that
* all packets sent by this tagger are VLAN-tagged and we can configure the
- * switch to drop untagged packets coming from the DSA master.
+ * switch to drop untagged packets coming from the DSA conduit.
*/
static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
struct sk_buff *skb, u8 pcp)
@@ -266,7 +266,7 @@ static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp,
static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
@@ -294,7 +294,7 @@ static struct sk_buff *sja1110_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
- struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct dsa_port *dp = dsa_user_to_port(netdev);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
u16 tx_vid = dsa_tag_8021q_standalone_vid(dp);
@@ -383,7 +383,7 @@ static struct sk_buff
* Buffer it until we get its meta frame.
*/
if (is_link_local) {
- struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
struct sja1105_tagger_private *priv;
struct dsa_switch *ds = dp->ds;
@@ -396,7 +396,7 @@ static struct sk_buff
if (priv->stampable_skb) {
dev_err_ratelimited(ds->dev,
"Expected meta frame, is %12llx "
- "in the DSA master multicast filter?\n",
+ "in the DSA conduit multicast filter?\n",
SJA1105_META_DMAC);
kfree_skb(priv->stampable_skb);
}
@@ -417,7 +417,7 @@ static struct sk_buff
* frame, which serves no further purpose).
*/
} else if (is_meta) {
- struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct dsa_port *dp = dsa_user_to_port(skb->dev);
struct sja1105_tagger_private *priv;
struct dsa_switch *ds = dp->ds;
struct sk_buff *stampable_skb;
@@ -550,7 +550,7 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
}
if (source_port != -1 && switch_id != -1)
- skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
+ skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port);
else if (vbid >= 1)
skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid);
else
@@ -573,16 +573,16 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
struct sja1105_tagger_data *tagger_data;
- struct net_device *master = skb->dev;
+ struct net_device *conduit = skb->dev;
struct dsa_port *cpu_dp;
struct dsa_switch *ds;
int i;
- cpu_dp = master->dsa_ptr;
+ cpu_dp = conduit->dsa_ptr;
ds = dsa_switch_find(cpu_dp->dst->index, switch_id);
if (!ds) {
net_err_ratelimited("%s: cannot find switch id %d\n",
- master->name, switch_id);
+ conduit->name, switch_id);
return NULL;
}
@@ -649,7 +649,7 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
/* skb->len counts from skb->data, while start_of_padding
* counts from the destination MAC address. Right now skb->data
- * is still as set by the DSA master, so to trim away the
+ * is still as set by the DSA conduit, so to trim away the
* padding and trailer we need to account for the fact that
* skb->data points to skb_mac_header(skb) + ETH_HLEN.
*/
@@ -698,7 +698,7 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
else if (source_port == -1 || switch_id == -1)
skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid);
else
- skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
+ skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port);
if (!skb->dev) {
netdev_warn(netdev, "Couldn't decode source port\n");
return NULL;
@@ -778,7 +778,7 @@ static const struct dsa_device_ops sja1105_netdev_ops = {
.disconnect = sja1105_disconnect,
.needed_headroom = VLAN_HLEN,
.flow_dissect = sja1105_flow_dissect,
- .promisc_on_master = true,
+ .promisc_on_conduit = true,
};
DSA_TAG_DRIVER(sja1105_netdev_ops);
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 7361b9106382..1ebb25a8b140 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -14,7 +14,7 @@
static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
u8 *trailer;
trailer = skb_put(skb, 4);
@@ -41,7 +41,7 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev)
source_port = trailer[1] & 7;
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
if (!skb->dev)
return NULL;
diff --git a/net/dsa/tag_xrs700x.c b/net/dsa/tag_xrs700x.c
index af19969f9bc4..c9c163598ef2 100644
--- a/net/dsa/tag_xrs700x.c
+++ b/net/dsa/tag_xrs700x.c
@@ -13,7 +13,7 @@
static struct sk_buff *xrs700x_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_port *partner, *dp = dsa_slave_to_port(dev);
+ struct dsa_port *partner, *dp = dsa_user_to_port(dev);
u8 *trailer;
trailer = skb_put(skb, 1);
@@ -39,7 +39,7 @@ static struct sk_buff *xrs700x_rcv(struct sk_buff *skb, struct net_device *dev)
if (source_port < 0)
return NULL;
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
if (!skb->dev)
return NULL;
diff --git a/net/dsa/slave.c b/net/dsa/user.c
index 4c3e502d7e16..d438884a4eb0 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/user.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * net/dsa/slave.c - Slave device handling
+ * net/dsa/user.c - user device handling
* Copyright (c) 2008-2009 Marvell Semiconductor
*/
@@ -23,13 +23,13 @@
#include <linux/netpoll.h>
#include <linux/string.h>
+#include "conduit.h"
#include "dsa.h"
-#include "port.h"
-#include "master.h"
#include "netlink.h"
-#include "slave.h"
+#include "port.h"
#include "switch.h"
#include "tag.h"
+#include "user.h"
struct dsa_switchdev_event_work {
struct net_device *dev;
@@ -79,13 +79,13 @@ static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds)
!ds->needs_standalone_vlan_filtering;
}
-static void dsa_slave_standalone_event_work(struct work_struct *work)
+static void dsa_user_standalone_event_work(struct work_struct *work)
{
struct dsa_standalone_event_work *standalone_work =
container_of(work, struct dsa_standalone_event_work, work);
const unsigned char *addr = standalone_work->addr;
struct net_device *dev = standalone_work->dev;
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_mdb mdb;
struct dsa_switch *ds = dp->ds;
u16 vid = standalone_work->vid;
@@ -140,10 +140,10 @@ static void dsa_slave_standalone_event_work(struct work_struct *work)
kfree(standalone_work);
}
-static int dsa_slave_schedule_standalone_work(struct net_device *dev,
- enum dsa_standalone_event event,
- const unsigned char *addr,
- u16 vid)
+static int dsa_user_schedule_standalone_work(struct net_device *dev,
+ enum dsa_standalone_event event,
+ const unsigned char *addr,
+ u16 vid)
{
struct dsa_standalone_event_work *standalone_work;
@@ -151,7 +151,7 @@ static int dsa_slave_schedule_standalone_work(struct net_device *dev,
if (!standalone_work)
return -ENOMEM;
- INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work);
+ INIT_WORK(&standalone_work->work, dsa_user_standalone_event_work);
standalone_work->event = event;
standalone_work->dev = dev;
@@ -163,18 +163,18 @@ static int dsa_slave_schedule_standalone_work(struct net_device *dev,
return 0;
}
-static int dsa_slave_host_vlan_rx_filtering(void *arg, int vid)
+static int dsa_user_host_vlan_rx_filtering(void *arg, int vid)
{
struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
- return dsa_slave_schedule_standalone_work(ctx->dev, ctx->event,
+ return dsa_user_schedule_standalone_work(ctx->dev, ctx->event,
ctx->addr, vid);
}
-static int dsa_slave_vlan_for_each(struct net_device *dev,
- int (*cb)(void *arg, int vid), void *arg)
+static int dsa_user_vlan_for_each(struct net_device *dev,
+ int (*cb)(void *arg, int vid), void *arg)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_vlan *v;
int err;
@@ -193,99 +193,99 @@ static int dsa_slave_vlan_for_each(struct net_device *dev,
return 0;
}
-static int dsa_slave_sync_uc(struct net_device *dev,
- const unsigned char *addr)
+static int dsa_user_sync_uc(struct net_device *dev,
+ const unsigned char *addr)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_host_vlan_rx_filtering_ctx ctx = {
.dev = dev,
.addr = addr,
.event = DSA_UC_ADD,
};
- dev_uc_add(master, addr);
+ dev_uc_add(conduit, addr);
if (!dsa_switch_supports_uc_filtering(dp->ds))
return 0;
- return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
+ return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
&ctx);
}
-static int dsa_slave_unsync_uc(struct net_device *dev,
- const unsigned char *addr)
+static int dsa_user_unsync_uc(struct net_device *dev,
+ const unsigned char *addr)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_host_vlan_rx_filtering_ctx ctx = {
.dev = dev,
.addr = addr,
.event = DSA_UC_DEL,
};
- dev_uc_del(master, addr);
+ dev_uc_del(conduit, addr);
if (!dsa_switch_supports_uc_filtering(dp->ds))
return 0;
- return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
+ return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
&ctx);
}
-static int dsa_slave_sync_mc(struct net_device *dev,
- const unsigned char *addr)
+static int dsa_user_sync_mc(struct net_device *dev,
+ const unsigned char *addr)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_host_vlan_rx_filtering_ctx ctx = {
.dev = dev,
.addr = addr,
.event = DSA_MC_ADD,
};
- dev_mc_add(master, addr);
+ dev_mc_add(conduit, addr);
if (!dsa_switch_supports_mc_filtering(dp->ds))
return 0;
- return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
+ return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
&ctx);
}
-static int dsa_slave_unsync_mc(struct net_device *dev,
- const unsigned char *addr)
+static int dsa_user_unsync_mc(struct net_device *dev,
+ const unsigned char *addr)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_host_vlan_rx_filtering_ctx ctx = {
.dev = dev,
.addr = addr,
.event = DSA_MC_DEL,
};
- dev_mc_del(master, addr);
+ dev_mc_del(conduit, addr);
if (!dsa_switch_supports_mc_filtering(dp->ds))
return 0;
- return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering,
+ return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering,
&ctx);
}
-void dsa_slave_sync_ha(struct net_device *dev)
+void dsa_user_sync_ha(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(dev);
netdev_for_each_synced_mc_addr(ha, dev)
- dsa_slave_sync_mc(dev, ha->addr);
+ dsa_user_sync_mc(dev, ha->addr);
netdev_for_each_synced_uc_addr(ha, dev)
- dsa_slave_sync_uc(dev, ha->addr);
+ dsa_user_sync_uc(dev, ha->addr);
netif_addr_unlock_bh(dev);
@@ -294,19 +294,19 @@ void dsa_slave_sync_ha(struct net_device *dev)
dsa_flush_workqueue();
}
-void dsa_slave_unsync_ha(struct net_device *dev)
+void dsa_user_unsync_ha(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(dev);
netdev_for_each_synced_uc_addr(ha, dev)
- dsa_slave_unsync_uc(dev, ha->addr);
+ dsa_user_unsync_uc(dev, ha->addr);
netdev_for_each_synced_mc_addr(ha, dev)
- dsa_slave_unsync_mc(dev, ha->addr);
+ dsa_user_unsync_mc(dev, ha->addr);
netif_addr_unlock_bh(dev);
@@ -315,8 +315,8 @@ void dsa_slave_unsync_ha(struct net_device *dev)
dsa_flush_workqueue();
}
-/* slave mii_bus handling ***************************************************/
-static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
+/* user mii_bus handling ***************************************************/
+static int dsa_user_phy_read(struct mii_bus *bus, int addr, int reg)
{
struct dsa_switch *ds = bus->priv;
@@ -326,7 +326,7 @@ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
return 0xffff;
}
-static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
+static int dsa_user_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
{
struct dsa_switch *ds = bus->priv;
@@ -336,35 +336,35 @@ static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
return 0;
}
-void dsa_slave_mii_bus_init(struct dsa_switch *ds)
+void dsa_user_mii_bus_init(struct dsa_switch *ds)
{
- ds->slave_mii_bus->priv = (void *)ds;
- ds->slave_mii_bus->name = "dsa slave smi";
- ds->slave_mii_bus->read = dsa_slave_phy_read;
- ds->slave_mii_bus->write = dsa_slave_phy_write;
- snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
+ ds->user_mii_bus->priv = (void *)ds;
+ ds->user_mii_bus->name = "dsa user smi";
+ ds->user_mii_bus->read = dsa_user_phy_read;
+ ds->user_mii_bus->write = dsa_user_phy_write;
+ snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
ds->dst->index, ds->index);
- ds->slave_mii_bus->parent = ds->dev;
- ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
+ ds->user_mii_bus->parent = ds->dev;
+ ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask;
}
-/* slave device handling ****************************************************/
-static int dsa_slave_get_iflink(const struct net_device *dev)
+/* user device handling ****************************************************/
+static int dsa_user_get_iflink(const struct net_device *dev)
{
- return dsa_slave_to_master(dev)->ifindex;
+ return dsa_user_to_conduit(dev)->ifindex;
}
-static int dsa_slave_open(struct net_device *dev)
+static int dsa_user_open(struct net_device *dev)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int err;
- err = dev_open(master, NULL);
+ err = dev_open(conduit, NULL);
if (err < 0) {
- netdev_err(dev, "failed to open master %s\n", master->name);
+ netdev_err(dev, "failed to open conduit %s\n", conduit->name);
goto out;
}
@@ -374,8 +374,8 @@ static int dsa_slave_open(struct net_device *dev)
goto out;
}
- if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
- err = dev_uc_add(master, dev->dev_addr);
+ if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) {
+ err = dev_uc_add(conduit, dev->dev_addr);
if (err < 0)
goto del_host_addr;
}
@@ -387,8 +387,8 @@ static int dsa_slave_open(struct net_device *dev)
return 0;
del_unicast:
- if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
- dev_uc_del(master, dev->dev_addr);
+ if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
+ dev_uc_del(conduit, dev->dev_addr);
del_host_addr:
if (dsa_switch_supports_uc_filtering(ds))
dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
@@ -396,16 +396,16 @@ out:
return err;
}
-static int dsa_slave_close(struct net_device *dev)
+static int dsa_user_close(struct net_device *dev)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
dsa_port_disable_rt(dp);
- if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
- dev_uc_del(master, dev->dev_addr);
+ if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
+ dev_uc_del(conduit, dev->dev_addr);
if (dsa_switch_supports_uc_filtering(ds))
dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
@@ -413,43 +413,43 @@ static int dsa_slave_close(struct net_device *dev)
return 0;
}
-static void dsa_slave_manage_host_flood(struct net_device *dev)
+static void dsa_user_manage_host_flood(struct net_device *dev)
{
bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
bool uc = dev->flags & IFF_PROMISC;
dsa_port_set_host_flood(dp, uc, mc);
}
-static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
+static void dsa_user_change_rx_flags(struct net_device *dev, int change)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (change & IFF_ALLMULTI)
- dev_set_allmulti(master,
+ dev_set_allmulti(conduit,
dev->flags & IFF_ALLMULTI ? 1 : -1);
if (change & IFF_PROMISC)
- dev_set_promiscuity(master,
+ dev_set_promiscuity(conduit,
dev->flags & IFF_PROMISC ? 1 : -1);
if (dsa_switch_supports_uc_filtering(ds) &&
dsa_switch_supports_mc_filtering(ds))
- dsa_slave_manage_host_flood(dev);
+ dsa_user_manage_host_flood(dev);
}
-static void dsa_slave_set_rx_mode(struct net_device *dev)
+static void dsa_user_set_rx_mode(struct net_device *dev)
{
- __dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc);
- __dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc);
+ __dev_mc_sync(dev, dsa_user_sync_mc, dsa_user_unsync_mc);
+ __dev_uc_sync(dev, dsa_user_sync_uc, dsa_user_unsync_uc);
}
-static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
+static int dsa_user_set_mac_address(struct net_device *dev, void *a)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct sockaddr *addr = a;
int err;
@@ -465,7 +465,7 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
}
/* If the port is down, the address isn't synced yet to hardware or
- * to the DSA master, so there is nothing to change.
+ * to the DSA conduit, so there is nothing to change.
*/
if (!(dev->flags & IFF_UP))
goto out_change_dev_addr;
@@ -476,14 +476,14 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
return err;
}
- if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
- err = dev_uc_add(master, addr->sa_data);
+ if (!ether_addr_equal(addr->sa_data, conduit->dev_addr)) {
+ err = dev_uc_add(conduit, addr->sa_data);
if (err < 0)
goto del_unicast;
}
- if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
- dev_uc_del(master, dev->dev_addr);
+ if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr))
+ dev_uc_del(conduit, dev->dev_addr);
if (dsa_switch_supports_uc_filtering(ds))
dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0);
@@ -500,7 +500,7 @@ del_unicast:
return err;
}
-struct dsa_slave_dump_ctx {
+struct dsa_user_dump_ctx {
struct net_device *dev;
struct sk_buff *skb;
struct netlink_callback *cb;
@@ -508,10 +508,10 @@ struct dsa_slave_dump_ctx {
};
static int
-dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
- bool is_static, void *data)
+dsa_user_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data)
{
- struct dsa_slave_dump_ctx *dump = data;
+ struct dsa_user_dump_ctx *dump = data;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
@@ -552,12 +552,12 @@ nla_put_failure:
}
static int
-dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct net_device *dev, struct net_device *filter_dev,
- int *idx)
+dsa_user_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev, struct net_device *filter_dev,
+ int *idx)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_slave_dump_ctx dump = {
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_user_dump_ctx dump = {
.dev = dev,
.skb = skb,
.cb = cb,
@@ -565,15 +565,15 @@ dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
};
int err;
- err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
+ err = dsa_port_fdb_dump(dp, dsa_user_port_fdb_do_dump, &dump);
*idx = dump.idx;
return err;
}
-static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+static int dsa_user_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->dp->ds;
int port = p->dp->index;
@@ -592,11 +592,11 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
}
-static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
- const struct switchdev_attr *attr,
- struct netlink_ext_ack *extack)
+static int dsa_user_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
int ret;
if (ctx && ctx != dp)
@@ -663,13 +663,13 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
/* Must be called under rcu_read_lock() */
static int
-dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
- const struct switchdev_obj_port_vlan *vlan)
+dsa_user_vlan_check_for_8021q_uppers(struct net_device *user,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct net_device *upper_dev;
struct list_head *iter;
- netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
+ netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
u16 vid;
if (!is_vlan_dev(upper_dev))
@@ -683,11 +683,11 @@ dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
return 0;
}
-static int dsa_slave_vlan_add(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct netlink_ext_ack *extack)
+static int dsa_user_vlan_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
int err;
@@ -703,7 +703,7 @@ static int dsa_slave_vlan_add(struct net_device *dev,
*/
if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
rcu_read_lock();
- err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
+ err = dsa_user_vlan_check_for_8021q_uppers(dev, vlan);
rcu_read_unlock();
if (err) {
NL_SET_ERR_MSG_MOD(extack,
@@ -718,11 +718,11 @@ static int dsa_slave_vlan_add(struct net_device *dev,
/* Offload a VLAN installed on the bridge or on a foreign interface by
* installing it as a VLAN towards the CPU port.
*/
-static int dsa_slave_host_vlan_add(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct netlink_ext_ack *extack)
+static int dsa_user_host_vlan_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan vlan;
/* Do nothing if this is a software bridge */
@@ -744,11 +744,11 @@ static int dsa_slave_host_vlan_add(struct net_device *dev,
return dsa_port_host_vlan_add(dp, &vlan, extack);
}
-static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
- const struct switchdev_obj *obj,
- struct netlink_ext_ack *extack)
+static int dsa_user_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
int err;
if (ctx && ctx != dp)
@@ -769,9 +769,9 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
- err = dsa_slave_vlan_add(dev, obj, extack);
+ err = dsa_user_vlan_add(dev, obj, extack);
else
- err = dsa_slave_host_vlan_add(dev, obj, extack);
+ err = dsa_user_host_vlan_add(dev, obj, extack);
break;
case SWITCHDEV_OBJ_ID_MRP:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
@@ -794,10 +794,10 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
return err;
}
-static int dsa_slave_vlan_del(struct net_device *dev,
- const struct switchdev_obj *obj)
+static int dsa_user_vlan_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
if (dsa_port_skip_vlan_configuration(dp))
@@ -808,10 +808,10 @@ static int dsa_slave_vlan_del(struct net_device *dev,
return dsa_port_vlan_del(dp, vlan);
}
-static int dsa_slave_host_vlan_del(struct net_device *dev,
- const struct switchdev_obj *obj)
+static int dsa_user_host_vlan_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
/* Do nothing if this is a software bridge */
@@ -826,10 +826,10 @@ static int dsa_slave_host_vlan_del(struct net_device *dev,
return dsa_port_host_vlan_del(dp, vlan);
}
-static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
- const struct switchdev_obj *obj)
+static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
int err;
if (ctx && ctx != dp)
@@ -850,9 +850,9 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
- err = dsa_slave_vlan_del(dev, obj);
+ err = dsa_user_vlan_del(dev, obj);
else
- err = dsa_slave_host_vlan_del(dev, obj);
+ err = dsa_user_host_vlan_del(dev, obj);
break;
case SWITCHDEV_OBJ_ID_MRP:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
@@ -875,11 +875,11 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
return err;
}
-static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
- struct sk_buff *skb)
+static inline netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev,
+ struct sk_buff *skb)
{
#ifdef CONFIG_NET_POLL_CONTROLLER
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
return netpoll_send_skb(p->netpoll, skb);
#else
@@ -888,7 +888,7 @@ static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
#endif
}
-static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
+static void dsa_skb_tx_timestamp(struct dsa_user_priv *p,
struct sk_buff *skb)
{
struct dsa_switch *ds = p->dp->ds;
@@ -908,12 +908,12 @@ netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
* tag to be successfully transmitted
*/
if (unlikely(netpoll_tx_running(dev)))
- return dsa_slave_netpoll_send_skb(dev, skb);
+ return dsa_user_netpoll_send_skb(dev, skb);
/* Queue the SKB for transmission on the parent interface, but
* do not modify its EtherType
*/
- skb->dev = dsa_slave_to_master(dev);
+ skb->dev = dsa_user_to_conduit(dev);
dev_queue_xmit(skb);
return NETDEV_TX_OK;
@@ -927,7 +927,7 @@ static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
/* For tail taggers, we need to pad short frames ourselves, to ensure
* that the tail tag does not fail at its role of being at the end of
- * the packet, once the master interface pads the frame. Account for
+ * the packet, once the conduit interface pads the frame. Account for
* that pad length here, and pad later.
*/
if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
@@ -944,9 +944,9 @@ static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
GFP_ATOMIC);
}
-static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct sk_buff *nskb;
dev_sw_netstats_tx_add(dev, 1, skb->len);
@@ -981,17 +981,17 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
/* ethtool operations *******************************************************/
-static void dsa_slave_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
+static void dsa_user_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
-static int dsa_slave_get_regs_len(struct net_device *dev)
+static int dsa_user_get_regs_len(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_regs_len)
@@ -1001,25 +1001,25 @@ static int dsa_slave_get_regs_len(struct net_device *dev)
}
static void
-dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
+dsa_user_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_regs)
ds->ops->get_regs(ds, dp->index, regs, _p);
}
-static int dsa_slave_nway_reset(struct net_device *dev)
+static int dsa_user_nway_reset(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
return phylink_ethtool_nway_reset(dp->pl);
}
-static int dsa_slave_get_eeprom_len(struct net_device *dev)
+static int dsa_user_get_eeprom_len(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->cd && ds->cd->eeprom_len)
@@ -1031,10 +1031,10 @@ static int dsa_slave_get_eeprom_len(struct net_device *dev)
return 0;
}
-static int dsa_slave_get_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
+static int dsa_user_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eeprom)
@@ -1043,10 +1043,10 @@ static int dsa_slave_get_eeprom(struct net_device *dev,
return -EOPNOTSUPP;
}
-static int dsa_slave_set_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
+static int dsa_user_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->set_eeprom)
@@ -1055,10 +1055,10 @@ static int dsa_slave_set_eeprom(struct net_device *dev,
return -EOPNOTSUPP;
}
-static void dsa_slave_get_strings(struct net_device *dev,
- uint32_t stringset, uint8_t *data)
+static void dsa_user_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (stringset == ETH_SS_STATS) {
@@ -1077,11 +1077,11 @@ static void dsa_slave_get_strings(struct net_device *dev,
}
-static void dsa_slave_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats,
- uint64_t *data)
+static void dsa_user_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct pcpu_sw_netstats *s;
unsigned int start;
@@ -1107,9 +1107,9 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
}
-static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
+static int dsa_user_get_sset_count(struct net_device *dev, int sset)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (sset == ETH_SS_STATS) {
@@ -1129,20 +1129,20 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
return -EOPNOTSUPP;
}
-static void dsa_slave_get_eth_phy_stats(struct net_device *dev,
- struct ethtool_eth_phy_stats *phy_stats)
+static void dsa_user_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eth_phy_stats)
ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats);
}
-static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
- struct ethtool_eth_mac_stats *mac_stats)
+static void dsa_user_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eth_mac_stats)
@@ -1150,10 +1150,10 @@ static void dsa_slave_get_eth_mac_stats(struct net_device *dev,
}
static void
-dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
- struct ethtool_eth_ctrl_stats *ctrl_stats)
+dsa_user_get_eth_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_eth_ctrl_stats)
@@ -1161,21 +1161,21 @@ dsa_slave_get_eth_ctrl_stats(struct net_device *dev,
}
static void
-dsa_slave_get_rmon_stats(struct net_device *dev,
- struct ethtool_rmon_stats *rmon_stats,
- const struct ethtool_rmon_hist_range **ranges)
+dsa_user_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_rmon_stats)
ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges);
}
-static void dsa_slave_net_selftest(struct net_device *ndev,
- struct ethtool_test *etest, u64 *buf)
+static void dsa_user_net_selftest(struct net_device *ndev,
+ struct ethtool_test *etest, u64 *buf)
{
- struct dsa_port *dp = dsa_slave_to_port(ndev);
+ struct dsa_port *dp = dsa_user_to_port(ndev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->self_test) {
@@ -1186,10 +1186,10 @@ static void dsa_slave_net_selftest(struct net_device *ndev,
net_selftest(ndev, etest, buf);
}
-static int dsa_slave_get_mm(struct net_device *dev,
- struct ethtool_mm_state *state)
+static int dsa_user_get_mm(struct net_device *dev,
+ struct ethtool_mm_state *state)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->get_mm)
@@ -1198,10 +1198,10 @@ static int dsa_slave_get_mm(struct net_device *dev,
return ds->ops->get_mm(ds, dp->index, state);
}
-static int dsa_slave_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
- struct netlink_ext_ack *extack)
+static int dsa_user_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->set_mm)
@@ -1210,19 +1210,19 @@ static int dsa_slave_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg,
return ds->ops->set_mm(ds, dp->index, cfg, extack);
}
-static void dsa_slave_get_mm_stats(struct net_device *dev,
- struct ethtool_mm_stats *stats)
+static void dsa_user_get_mm_stats(struct net_device *dev,
+ struct ethtool_mm_stats *stats)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_mm_stats)
ds->ops->get_mm_stats(ds, dp->index, stats);
}
-static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+static void dsa_user_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
phylink_ethtool_get_wol(dp->pl, w);
@@ -1231,9 +1231,9 @@ static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
ds->ops->get_wol(ds, dp->index, w);
}
-static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int ret = -EOPNOTSUPP;
@@ -1245,9 +1245,9 @@ static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
return ret;
}
-static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_set_eee(struct net_device *dev, struct ethtool_eee *e)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int ret;
@@ -1265,9 +1265,9 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
return phylink_ethtool_set_eee(dp->pl, e);
}
-static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int dsa_user_get_eee(struct net_device *dev, struct ethtool_eee *e)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int ret;
@@ -1285,54 +1285,54 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
return phylink_ethtool_get_eee(dp->pl, e);
}
-static int dsa_slave_get_link_ksettings(struct net_device *dev,
- struct ethtool_link_ksettings *cmd)
+static int dsa_user_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
return phylink_ethtool_ksettings_get(dp->pl, cmd);
}
-static int dsa_slave_set_link_ksettings(struct net_device *dev,
- const struct ethtool_link_ksettings *cmd)
+static int dsa_user_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
return phylink_ethtool_ksettings_set(dp->pl, cmd);
}
-static void dsa_slave_get_pause_stats(struct net_device *dev,
- struct ethtool_pause_stats *pause_stats)
+static void dsa_user_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_pause_stats)
ds->ops->get_pause_stats(ds, dp->index, pause_stats);
}
-static void dsa_slave_get_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pause)
+static void dsa_user_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
phylink_ethtool_get_pauseparam(dp->pl, pause);
}
-static int dsa_slave_set_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pause)
+static int dsa_user_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
return phylink_ethtool_set_pauseparam(dp->pl, pause);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int dsa_slave_netpoll_setup(struct net_device *dev,
- struct netpoll_info *ni)
+static int dsa_user_netpoll_setup(struct net_device *dev,
+ struct netpoll_info *ni)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct netpoll *netpoll;
int err = 0;
@@ -1340,7 +1340,7 @@ static int dsa_slave_netpoll_setup(struct net_device *dev,
if (!netpoll)
return -ENOMEM;
- err = __netpoll_setup(netpoll, master);
+ err = __netpoll_setup(netpoll, conduit);
if (err) {
kfree(netpoll);
goto out;
@@ -1351,9 +1351,9 @@ out:
return err;
}
-static void dsa_slave_netpoll_cleanup(struct net_device *dev)
+static void dsa_user_netpoll_cleanup(struct net_device *dev)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct netpoll *netpoll = p->netpoll;
if (!netpoll)
@@ -1364,15 +1364,15 @@ static void dsa_slave_netpoll_cleanup(struct net_device *dev)
__netpoll_free(netpoll);
}
-static void dsa_slave_poll_controller(struct net_device *dev)
+static void dsa_user_poll_controller(struct net_device *dev)
{
}
#endif
static struct dsa_mall_tc_entry *
-dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
+dsa_user_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_mall_tc_entry *mall_tc_entry;
list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
@@ -1383,13 +1383,13 @@ dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
}
static int
-dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
- struct tc_cls_matchall_offload *cls,
- bool ingress)
+dsa_user_add_cls_matchall_mirred(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
{
struct netlink_ext_ack *extack = cls->common.extack;
- struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_mall_mirror_tc_entry *mirror;
struct dsa_mall_tc_entry *mall_tc_entry;
struct dsa_switch *ds = dp->ds;
@@ -1409,7 +1409,7 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
if (!act->dev)
return -EINVAL;
- if (!dsa_slave_dev_check(act->dev))
+ if (!dsa_user_dev_check(act->dev))
return -EOPNOTSUPP;
mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
@@ -1420,7 +1420,7 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror;
- to_dp = dsa_slave_to_port(act->dev);
+ to_dp = dsa_user_to_port(act->dev);
mirror->to_local_port = to_dp->index;
mirror->ingress = ingress;
@@ -1437,13 +1437,13 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
}
static int
-dsa_slave_add_cls_matchall_police(struct net_device *dev,
- struct tc_cls_matchall_offload *cls,
- bool ingress)
+dsa_user_add_cls_matchall_police(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
{
struct netlink_ext_ack *extack = cls->common.extack;
- struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_mall_policer_tc_entry *policer;
struct dsa_mall_tc_entry *mall_tc_entry;
struct dsa_switch *ds = dp->ds;
@@ -1497,31 +1497,31 @@ dsa_slave_add_cls_matchall_police(struct net_device *dev,
return err;
}
-static int dsa_slave_add_cls_matchall(struct net_device *dev,
- struct tc_cls_matchall_offload *cls,
- bool ingress)
+static int dsa_user_add_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
{
int err = -EOPNOTSUPP;
if (cls->common.protocol == htons(ETH_P_ALL) &&
flow_offload_has_one_action(&cls->rule->action) &&
cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED)
- err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress);
+ err = dsa_user_add_cls_matchall_mirred(dev, cls, ingress);
else if (flow_offload_has_one_action(&cls->rule->action) &&
cls->rule->action.entries[0].id == FLOW_ACTION_POLICE)
- err = dsa_slave_add_cls_matchall_police(dev, cls, ingress);
+ err = dsa_user_add_cls_matchall_police(dev, cls, ingress);
return err;
}
-static void dsa_slave_del_cls_matchall(struct net_device *dev,
- struct tc_cls_matchall_offload *cls)
+static void dsa_user_del_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_mall_tc_entry *mall_tc_entry;
struct dsa_switch *ds = dp->ds;
- mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
+ mall_tc_entry = dsa_user_mall_tc_entry_find(dev, cls->cookie);
if (!mall_tc_entry)
return;
@@ -1544,29 +1544,29 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev,
kfree(mall_tc_entry);
}
-static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
- struct tc_cls_matchall_offload *cls,
- bool ingress)
+static int dsa_user_setup_tc_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
{
if (cls->common.chain_index)
return -EOPNOTSUPP;
switch (cls->command) {
case TC_CLSMATCHALL_REPLACE:
- return dsa_slave_add_cls_matchall(dev, cls, ingress);
+ return dsa_user_add_cls_matchall(dev, cls, ingress);
case TC_CLSMATCHALL_DESTROY:
- dsa_slave_del_cls_matchall(dev, cls);
+ dsa_user_del_cls_matchall(dev, cls);
return 0;
default:
return -EOPNOTSUPP;
}
}
-static int dsa_slave_add_cls_flower(struct net_device *dev,
- struct flow_cls_offload *cls,
- bool ingress)
+static int dsa_user_add_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
@@ -1576,11 +1576,11 @@ static int dsa_slave_add_cls_flower(struct net_device *dev,
return ds->ops->cls_flower_add(ds, port, cls, ingress);
}
-static int dsa_slave_del_cls_flower(struct net_device *dev,
- struct flow_cls_offload *cls,
- bool ingress)
+static int dsa_user_del_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
@@ -1590,11 +1590,11 @@ static int dsa_slave_del_cls_flower(struct net_device *dev,
return ds->ops->cls_flower_del(ds, port, cls, ingress);
}
-static int dsa_slave_stats_cls_flower(struct net_device *dev,
- struct flow_cls_offload *cls,
- bool ingress)
+static int dsa_user_stats_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
@@ -1604,24 +1604,24 @@ static int dsa_slave_stats_cls_flower(struct net_device *dev,
return ds->ops->cls_flower_stats(ds, port, cls, ingress);
}
-static int dsa_slave_setup_tc_cls_flower(struct net_device *dev,
- struct flow_cls_offload *cls,
- bool ingress)
+static int dsa_user_setup_tc_cls_flower(struct net_device *dev,
+ struct flow_cls_offload *cls,
+ bool ingress)
{
switch (cls->command) {
case FLOW_CLS_REPLACE:
- return dsa_slave_add_cls_flower(dev, cls, ingress);
+ return dsa_user_add_cls_flower(dev, cls, ingress);
case FLOW_CLS_DESTROY:
- return dsa_slave_del_cls_flower(dev, cls, ingress);
+ return dsa_user_del_cls_flower(dev, cls, ingress);
case FLOW_CLS_STATS:
- return dsa_slave_stats_cls_flower(dev, cls, ingress);
+ return dsa_user_stats_cls_flower(dev, cls, ingress);
default:
return -EOPNOTSUPP;
}
}
-static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv, bool ingress)
+static int dsa_user_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv, bool ingress)
{
struct net_device *dev = cb_priv;
@@ -1630,46 +1630,46 @@ static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSMATCHALL:
- return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
+ return dsa_user_setup_tc_cls_matchall(dev, type_data, ingress);
case TC_SETUP_CLSFLOWER:
- return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress);
+ return dsa_user_setup_tc_cls_flower(dev, type_data, ingress);
default:
return -EOPNOTSUPP;
}
}
-static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int dsa_user_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
- return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
+ return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, true);
}
-static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int dsa_user_setup_tc_block_cb_eg(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
- return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
+ return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, false);
}
-static LIST_HEAD(dsa_slave_block_cb_list);
+static LIST_HEAD(dsa_user_block_cb_list);
-static int dsa_slave_setup_tc_block(struct net_device *dev,
- struct flow_block_offload *f)
+static int dsa_user_setup_tc_block(struct net_device *dev,
+ struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- cb = dsa_slave_setup_tc_block_cb_ig;
+ cb = dsa_user_setup_tc_block_cb_ig;
else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
- cb = dsa_slave_setup_tc_block_cb_eg;
+ cb = dsa_user_setup_tc_block_cb_eg;
else
return -EOPNOTSUPP;
- f->driver_block_list = &dsa_slave_block_cb_list;
+ f->driver_block_list = &dsa_user_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
+ if (flow_block_cb_is_busy(cb, dev, &dsa_user_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
@@ -1677,7 +1677,7 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
return PTR_ERR(block_cb);
flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
+ list_add_tail(&block_cb->driver_list, &dsa_user_block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f->block, cb, dev);
@@ -1692,28 +1692,28 @@ static int dsa_slave_setup_tc_block(struct net_device *dev,
}
}
-static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port,
- void *type_data)
+static int dsa_user_setup_ft_block(struct dsa_switch *ds, int port,
+ void *type_data)
{
- struct net_device *master = dsa_port_to_master(dsa_to_port(ds, port));
+ struct net_device *conduit = dsa_port_to_conduit(dsa_to_port(ds, port));
- if (!master->netdev_ops->ndo_setup_tc)
+ if (!conduit->netdev_ops->ndo_setup_tc)
return -EOPNOTSUPP;
- return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data);
+ return conduit->netdev_ops->ndo_setup_tc(conduit, TC_SETUP_FT, type_data);
}
-static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+static int dsa_user_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
switch (type) {
case TC_SETUP_BLOCK:
- return dsa_slave_setup_tc_block(dev, type_data);
+ return dsa_user_setup_tc_block(dev, type_data);
case TC_SETUP_FT:
- return dsa_slave_setup_ft_block(ds, dp->index, type_data);
+ return dsa_user_setup_ft_block(ds, dp->index, type_data);
default:
break;
}
@@ -1724,10 +1724,10 @@ static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
return ds->ops->port_setup_tc(ds, dp->index, type, type_data);
}
-static int dsa_slave_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *nfc, u32 *rule_locs)
+static int dsa_user_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->get_rxnfc)
@@ -1736,10 +1736,10 @@ static int dsa_slave_get_rxnfc(struct net_device *dev,
return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
}
-static int dsa_slave_set_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *nfc)
+static int dsa_user_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (!ds->ops->set_rxnfc)
@@ -1748,10 +1748,10 @@ static int dsa_slave_set_rxnfc(struct net_device *dev,
return ds->ops->set_rxnfc(ds, dp->index, nfc);
}
-static int dsa_slave_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *ts)
+static int dsa_user_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *ts)
{
- struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_user_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->dp->ds;
if (!ds->ops->get_ts_info)
@@ -1760,10 +1760,10 @@ static int dsa_slave_get_ts_info(struct net_device *dev,
return ds->ops->get_ts_info(ds, p->dp->index, ts);
}
-static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
- u16 vid)
+static int dsa_user_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.vid = vid,
@@ -1810,15 +1810,15 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
if (dsa_switch_supports_mc_filtering(ds)) {
netdev_for_each_synced_mc_addr(ha, dev) {
- dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD,
- ha->addr, vid);
+ dsa_user_schedule_standalone_work(dev, DSA_MC_ADD,
+ ha->addr, vid);
}
}
if (dsa_switch_supports_uc_filtering(ds)) {
netdev_for_each_synced_uc_addr(ha, dev) {
- dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD,
- ha->addr, vid);
+ dsa_user_schedule_standalone_work(dev, DSA_UC_ADD,
+ ha->addr, vid);
}
}
@@ -1835,10 +1835,10 @@ rollback:
return ret;
}
-static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
- u16 vid)
+static int dsa_user_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ u16 vid)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.vid = vid,
/* This API only allows programming tagged, non-PVID VIDs */
@@ -1874,15 +1874,15 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
if (dsa_switch_supports_mc_filtering(ds)) {
netdev_for_each_synced_mc_addr(ha, dev) {
- dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL,
- ha->addr, vid);
+ dsa_user_schedule_standalone_work(dev, DSA_MC_DEL,
+ ha->addr, vid);
}
}
if (dsa_switch_supports_uc_filtering(ds)) {
netdev_for_each_synced_uc_addr(ha, dev) {
- dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL,
- ha->addr, vid);
+ dsa_user_schedule_standalone_work(dev, DSA_UC_DEL,
+ ha->addr, vid);
}
}
@@ -1893,18 +1893,18 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
return 0;
}
-static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
+static int dsa_user_restore_vlan(struct net_device *vdev, int vid, void *arg)
{
__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
- return dsa_slave_vlan_rx_add_vid(arg, proto, vid);
+ return dsa_user_vlan_rx_add_vid(arg, proto, vid);
}
-static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
+static int dsa_user_clear_vlan(struct net_device *vdev, int vid, void *arg)
{
__be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q);
- return dsa_slave_vlan_rx_kill_vid(arg, proto, vid);
+ return dsa_user_vlan_rx_kill_vid(arg, proto, vid);
}
/* Keep the VLAN RX filtering list in sync with the hardware only if VLAN
@@ -1938,26 +1938,26 @@ static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg)
* - the bridge VLANs
* - the 8021q upper VLANs
*/
-int dsa_slave_manage_vlan_filtering(struct net_device *slave,
- bool vlan_filtering)
+int dsa_user_manage_vlan_filtering(struct net_device *user,
+ bool vlan_filtering)
{
int err;
if (vlan_filtering) {
- slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- err = vlan_for_each(slave, dsa_slave_restore_vlan, slave);
+ err = vlan_for_each(user, dsa_user_restore_vlan, user);
if (err) {
- vlan_for_each(slave, dsa_slave_clear_vlan, slave);
- slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ vlan_for_each(user, dsa_user_clear_vlan, user);
+ user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
return err;
}
} else {
- err = vlan_for_each(slave, dsa_slave_clear_vlan, slave);
+ err = vlan_for_each(user, dsa_user_clear_vlan, user);
if (err)
return err;
- slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
}
return 0;
@@ -2028,7 +2028,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
list_for_each_entry(dst, &dsa_tree_list, list) {
list_for_each_entry(other_dp, &dst->ports, list) {
struct dsa_hw_port *hw_port;
- struct net_device *slave;
+ struct net_device *user;
if (other_dp->type != DSA_PORT_TYPE_USER)
continue;
@@ -2039,17 +2039,17 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
if (!other_dp->ds->mtu_enforcement_ingress)
continue;
- slave = other_dp->slave;
+ user = other_dp->user;
- if (min_mtu > slave->mtu)
- min_mtu = slave->mtu;
+ if (min_mtu > user->mtu)
+ min_mtu = user->mtu;
hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL);
if (!hw_port)
goto out;
- hw_port->dev = slave;
- hw_port->old_mtu = slave->mtu;
+ hw_port->dev = user;
+ hw_port->old_mtu = user->mtu;
list_add(&hw_port->list, &hw_port_list);
}
@@ -2059,7 +2059,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
* interface's MTU first, regardless of whether the intention of the
* user was to raise or lower it.
*/
- err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu);
+ err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->user->mtu);
if (!err)
goto out;
@@ -2073,16 +2073,16 @@ out:
dsa_hw_port_list_free(&hw_port_list);
}
-int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+int dsa_user_change_mtu(struct net_device *dev, int new_mtu)
{
- struct net_device *master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_port *cpu_dp = dp->cpu_dp;
struct dsa_switch *ds = dp->ds;
struct dsa_port *other_dp;
int largest_mtu = 0;
- int new_master_mtu;
- int old_master_mtu;
+ int new_conduit_mtu;
+ int old_conduit_mtu;
int mtu_limit;
int overhead;
int cpu_mtu;
@@ -2092,44 +2092,44 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
return -EOPNOTSUPP;
dsa_tree_for_each_user_port(other_dp, ds->dst) {
- int slave_mtu;
+ int user_mtu;
- /* During probe, this function will be called for each slave
+ /* During probe, this function will be called for each user
* device, while not all of them have been allocated. That's
* ok, it doesn't change what the maximum is, so ignore it.
*/
- if (!other_dp->slave)
+ if (!other_dp->user)
continue;
/* Pretend that we already applied the setting, which we
* actually haven't (still haven't done all integrity checks)
*/
if (dp == other_dp)
- slave_mtu = new_mtu;
+ user_mtu = new_mtu;
else
- slave_mtu = other_dp->slave->mtu;
+ user_mtu = other_dp->user->mtu;
- if (largest_mtu < slave_mtu)
- largest_mtu = slave_mtu;
+ if (largest_mtu < user_mtu)
+ largest_mtu = user_mtu;
}
overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
- mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead);
- old_master_mtu = master->mtu;
- new_master_mtu = largest_mtu + overhead;
- if (new_master_mtu > mtu_limit)
+ mtu_limit = min_t(int, conduit->max_mtu, dev->max_mtu + overhead);
+ old_conduit_mtu = conduit->mtu;
+ new_conduit_mtu = largest_mtu + overhead;
+ if (new_conduit_mtu > mtu_limit)
return -ERANGE;
- /* If the master MTU isn't over limit, there's no need to check the CPU
+ /* If the conduit MTU isn't over limit, there's no need to check the CPU
* MTU, since that surely isn't either.
*/
cpu_mtu = largest_mtu;
/* Start applying stuff */
- if (new_master_mtu != old_master_mtu) {
- err = dev_set_mtu(master, new_master_mtu);
+ if (new_conduit_mtu != old_conduit_mtu) {
+ err = dev_set_mtu(conduit, new_conduit_mtu);
if (err < 0)
- goto out_master_failed;
+ goto out_conduit_failed;
/* We only need to propagate the MTU of the CPU port to
* upstream switches, so emit a notifier which updates them.
@@ -2150,19 +2150,19 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
return 0;
out_port_failed:
- if (new_master_mtu != old_master_mtu)
- dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead);
+ if (new_conduit_mtu != old_conduit_mtu)
+ dsa_port_mtu_change(cpu_dp, old_conduit_mtu - overhead);
out_cpu_failed:
- if (new_master_mtu != old_master_mtu)
- dev_set_mtu(master, old_master_mtu);
-out_master_failed:
+ if (new_conduit_mtu != old_conduit_mtu)
+ dev_set_mtu(conduit, old_conduit_mtu);
+out_conduit_failed:
return err;
}
static int __maybe_unused
-dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
+dsa_user_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
unsigned long mask, new_prio;
int err, port = dp->index;
@@ -2187,9 +2187,9 @@ dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app)
}
static int __maybe_unused
-dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
+dsa_user_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
unsigned long mask, new_prio;
int err, port = dp->index;
@@ -2220,29 +2220,29 @@ dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app)
return 0;
}
-static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev,
- struct dcb_app *app)
+static int __maybe_unused dsa_user_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
{
switch (app->selector) {
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
switch (app->protocol) {
case 0:
- return dsa_slave_dcbnl_set_default_prio(dev, app);
+ return dsa_user_dcbnl_set_default_prio(dev, app);
default:
return -EOPNOTSUPP;
}
break;
case IEEE_8021QAZ_APP_SEL_DSCP:
- return dsa_slave_dcbnl_add_dscp_prio(dev, app);
+ return dsa_user_dcbnl_add_dscp_prio(dev, app);
default:
return -EOPNOTSUPP;
}
}
static int __maybe_unused
-dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
+dsa_user_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
unsigned long mask, new_prio;
int err, port = dp->index;
@@ -2267,9 +2267,9 @@ dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app)
}
static int __maybe_unused
-dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
+dsa_user_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int err, port = dp->index;
u8 dscp = app->protocol;
@@ -2290,20 +2290,20 @@ dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app)
return 0;
}
-static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
- struct dcb_app *app)
+static int __maybe_unused dsa_user_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
{
switch (app->selector) {
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
switch (app->protocol) {
case 0:
- return dsa_slave_dcbnl_del_default_prio(dev, app);
+ return dsa_user_dcbnl_del_default_prio(dev, app);
default:
return -EOPNOTSUPP;
}
break;
case IEEE_8021QAZ_APP_SEL_DSCP:
- return dsa_slave_dcbnl_del_dscp_prio(dev, app);
+ return dsa_user_dcbnl_del_dscp_prio(dev, app);
default:
return -EOPNOTSUPP;
}
@@ -2312,9 +2312,9 @@ static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev,
/* Pre-populate the DCB application priority table with the priorities
* configured during switch setup, which we read from hardware here.
*/
-static int dsa_slave_dcbnl_init(struct net_device *dev)
+static int dsa_user_dcbnl_init(struct net_device *dev)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
int port = dp->index;
int err;
@@ -2362,49 +2362,49 @@ static int dsa_slave_dcbnl_init(struct net_device *dev)
return 0;
}
-static const struct ethtool_ops dsa_slave_ethtool_ops = {
- .get_drvinfo = dsa_slave_get_drvinfo,
- .get_regs_len = dsa_slave_get_regs_len,
- .get_regs = dsa_slave_get_regs,
- .nway_reset = dsa_slave_nway_reset,
+static const struct ethtool_ops dsa_user_ethtool_ops = {
+ .get_drvinfo = dsa_user_get_drvinfo,
+ .get_regs_len = dsa_user_get_regs_len,
+ .get_regs = dsa_user_get_regs,
+ .nway_reset = dsa_user_nway_reset,
.get_link = ethtool_op_get_link,
- .get_eeprom_len = dsa_slave_get_eeprom_len,
- .get_eeprom = dsa_slave_get_eeprom,
- .set_eeprom = dsa_slave_set_eeprom,
- .get_strings = dsa_slave_get_strings,
- .get_ethtool_stats = dsa_slave_get_ethtool_stats,
- .get_sset_count = dsa_slave_get_sset_count,
- .get_eth_phy_stats = dsa_slave_get_eth_phy_stats,
- .get_eth_mac_stats = dsa_slave_get_eth_mac_stats,
- .get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats,
- .get_rmon_stats = dsa_slave_get_rmon_stats,
- .set_wol = dsa_slave_set_wol,
- .get_wol = dsa_slave_get_wol,
- .set_eee = dsa_slave_set_eee,
- .get_eee = dsa_slave_get_eee,
- .get_link_ksettings = dsa_slave_get_link_ksettings,
- .set_link_ksettings = dsa_slave_set_link_ksettings,
- .get_pause_stats = dsa_slave_get_pause_stats,
- .get_pauseparam = dsa_slave_get_pauseparam,
- .set_pauseparam = dsa_slave_set_pauseparam,
- .get_rxnfc = dsa_slave_get_rxnfc,
- .set_rxnfc = dsa_slave_set_rxnfc,
- .get_ts_info = dsa_slave_get_ts_info,
- .self_test = dsa_slave_net_selftest,
- .get_mm = dsa_slave_get_mm,
- .set_mm = dsa_slave_set_mm,
- .get_mm_stats = dsa_slave_get_mm_stats,
+ .get_eeprom_len = dsa_user_get_eeprom_len,
+ .get_eeprom = dsa_user_get_eeprom,
+ .set_eeprom = dsa_user_set_eeprom,
+ .get_strings = dsa_user_get_strings,
+ .get_ethtool_stats = dsa_user_get_ethtool_stats,
+ .get_sset_count = dsa_user_get_sset_count,
+ .get_eth_phy_stats = dsa_user_get_eth_phy_stats,
+ .get_eth_mac_stats = dsa_user_get_eth_mac_stats,
+ .get_eth_ctrl_stats = dsa_user_get_eth_ctrl_stats,
+ .get_rmon_stats = dsa_user_get_rmon_stats,
+ .set_wol = dsa_user_set_wol,
+ .get_wol = dsa_user_get_wol,
+ .set_eee = dsa_user_set_eee,
+ .get_eee = dsa_user_get_eee,
+ .get_link_ksettings = dsa_user_get_link_ksettings,
+ .set_link_ksettings = dsa_user_set_link_ksettings,
+ .get_pause_stats = dsa_user_get_pause_stats,
+ .get_pauseparam = dsa_user_get_pauseparam,
+ .set_pauseparam = dsa_user_set_pauseparam,
+ .get_rxnfc = dsa_user_get_rxnfc,
+ .set_rxnfc = dsa_user_set_rxnfc,
+ .get_ts_info = dsa_user_get_ts_info,
+ .self_test = dsa_user_net_selftest,
+ .get_mm = dsa_user_get_mm,
+ .set_mm = dsa_user_set_mm,
+ .get_mm_stats = dsa_user_get_mm_stats,
};
-static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
- .ieee_setapp = dsa_slave_dcbnl_ieee_setapp,
- .ieee_delapp = dsa_slave_dcbnl_ieee_delapp,
+static const struct dcbnl_rtnl_ops __maybe_unused dsa_user_dcbnl_ops = {
+ .ieee_setapp = dsa_user_dcbnl_ieee_setapp,
+ .ieee_delapp = dsa_user_dcbnl_ieee_delapp,
};
-static void dsa_slave_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *s)
+static void dsa_user_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *s)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
if (ds->ops->get_stats64)
@@ -2413,43 +2413,43 @@ static void dsa_slave_get_stats64(struct net_device *dev,
dev_get_tstats64(dev, s);
}
-static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx,
- struct net_device_path *path)
+static int dsa_user_fill_forward_path(struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
{
- struct dsa_port *dp = dsa_slave_to_port(ctx->dev);
- struct net_device *master = dsa_port_to_master(dp);
+ struct dsa_port *dp = dsa_user_to_port(ctx->dev);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
struct dsa_port *cpu_dp = dp->cpu_dp;
path->dev = ctx->dev;
path->type = DEV_PATH_DSA;
path->dsa.proto = cpu_dp->tag_ops->proto;
path->dsa.port = dp->index;
- ctx->dev = master;
+ ctx->dev = conduit;
return 0;
}
-static const struct net_device_ops dsa_slave_netdev_ops = {
- .ndo_open = dsa_slave_open,
- .ndo_stop = dsa_slave_close,
- .ndo_start_xmit = dsa_slave_xmit,
- .ndo_change_rx_flags = dsa_slave_change_rx_flags,
- .ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_mac_address = dsa_slave_set_mac_address,
- .ndo_fdb_dump = dsa_slave_fdb_dump,
- .ndo_eth_ioctl = dsa_slave_ioctl,
- .ndo_get_iflink = dsa_slave_get_iflink,
+static const struct net_device_ops dsa_user_netdev_ops = {
+ .ndo_open = dsa_user_open,
+ .ndo_stop = dsa_user_close,
+ .ndo_start_xmit = dsa_user_xmit,
+ .ndo_change_rx_flags = dsa_user_change_rx_flags,
+ .ndo_set_rx_mode = dsa_user_set_rx_mode,
+ .ndo_set_mac_address = dsa_user_set_mac_address,
+ .ndo_fdb_dump = dsa_user_fdb_dump,
+ .ndo_eth_ioctl = dsa_user_ioctl,
+ .ndo_get_iflink = dsa_user_get_iflink,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_netpoll_setup = dsa_slave_netpoll_setup,
- .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
- .ndo_poll_controller = dsa_slave_poll_controller,
+ .ndo_netpoll_setup = dsa_user_netpoll_setup,
+ .ndo_netpoll_cleanup = dsa_user_netpoll_cleanup,
+ .ndo_poll_controller = dsa_user_poll_controller,
#endif
- .ndo_setup_tc = dsa_slave_setup_tc,
- .ndo_get_stats64 = dsa_slave_get_stats64,
- .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
- .ndo_change_mtu = dsa_slave_change_mtu,
- .ndo_fill_forward_path = dsa_slave_fill_forward_path,
+ .ndo_setup_tc = dsa_user_setup_tc,
+ .ndo_get_stats64 = dsa_user_get_stats64,
+ .ndo_vlan_rx_add_vid = dsa_user_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dsa_user_vlan_rx_kill_vid,
+ .ndo_change_mtu = dsa_user_change_mtu,
+ .ndo_fill_forward_path = dsa_user_fill_forward_path,
};
static struct device_type dsa_type = {
@@ -2465,8 +2465,8 @@ void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
}
EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
-static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void dsa_user_phylink_fixed_state(struct phylink_config *config,
+ struct phylink_link_state *state)
{
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct dsa_switch *ds = dp->ds;
@@ -2477,33 +2477,33 @@ static void dsa_slave_phylink_fixed_state(struct phylink_config *config,
ds->ops->phylink_fixed_state(ds, dp->index, state);
}
-/* slave device setup *******************************************************/
-static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr,
- u32 flags)
+/* user device setup *******************************************************/
+static int dsa_user_phy_connect(struct net_device *user_dev, int addr,
+ u32 flags)
{
- struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct dsa_port *dp = dsa_user_to_port(user_dev);
struct dsa_switch *ds = dp->ds;
- slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
- if (!slave_dev->phydev) {
- netdev_err(slave_dev, "no phy at %d\n", addr);
+ user_dev->phydev = mdiobus_get_phy(ds->user_mii_bus, addr);
+ if (!user_dev->phydev) {
+ netdev_err(user_dev, "no phy at %d\n", addr);
return -ENODEV;
}
- slave_dev->phydev->dev_flags |= flags;
+ user_dev->phydev->dev_flags |= flags;
- return phylink_connect_phy(dp->pl, slave_dev->phydev);
+ return phylink_connect_phy(dp->pl, user_dev->phydev);
}
-static int dsa_slave_phy_setup(struct net_device *slave_dev)
+static int dsa_user_phy_setup(struct net_device *user_dev)
{
- struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct dsa_port *dp = dsa_user_to_port(user_dev);
struct device_node *port_dn = dp->dn;
struct dsa_switch *ds = dp->ds;
u32 phy_flags = 0;
int ret;
- dp->pl_config.dev = &slave_dev->dev;
+ dp->pl_config.dev = &user_dev->dev;
dp->pl_config.type = PHYLINK_NETDEV;
/* The get_fixed_state callback takes precedence over polling the
@@ -2511,7 +2511,7 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
* this if the switch provides such a callback.
*/
if (ds->ops->phylink_fixed_state) {
- dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state;
+ dp->pl_config.get_fixed_state = dsa_user_phylink_fixed_state;
dp->pl_config.poll_fixed_state = true;
}
@@ -2523,14 +2523,14 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
phy_flags = ds->ops->get_phy_flags(ds, dp->index);
ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
- if (ret == -ENODEV && ds->slave_mii_bus) {
+ if (ret == -ENODEV && ds->user_mii_bus) {
/* We could not connect to a designated PHY or SFP, so try to
* use the switch internal MDIO bus instead
*/
- ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags);
+ ret = dsa_user_phy_connect(user_dev, dp->index, phy_flags);
}
if (ret) {
- netdev_err(slave_dev, "failed to connect to PHY: %pe\n",
+ netdev_err(user_dev, "failed to connect to PHY: %pe\n",
ERR_PTR(ret));
dsa_port_phylink_destroy(dp);
}
@@ -2538,42 +2538,42 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
return ret;
}
-void dsa_slave_setup_tagger(struct net_device *slave)
+void dsa_user_setup_tagger(struct net_device *user)
{
- struct dsa_port *dp = dsa_slave_to_port(slave);
- struct net_device *master = dsa_port_to_master(dp);
- struct dsa_slave_priv *p = netdev_priv(slave);
+ struct dsa_port *dp = dsa_user_to_port(user);
+ struct net_device *conduit = dsa_port_to_conduit(dp);
+ struct dsa_user_priv *p = netdev_priv(user);
const struct dsa_port *cpu_dp = dp->cpu_dp;
const struct dsa_switch *ds = dp->ds;
- slave->needed_headroom = cpu_dp->tag_ops->needed_headroom;
- slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
- /* Try to save one extra realloc later in the TX path (in the master)
- * by also inheriting the master's needed headroom and tailroom.
+ user->needed_headroom = cpu_dp->tag_ops->needed_headroom;
+ user->needed_tailroom = cpu_dp->tag_ops->needed_tailroom;
+ /* Try to save one extra realloc later in the TX path (in the conduit)
+ * by also inheriting the conduit's needed headroom and tailroom.
* The 8021q driver also does this.
*/
- slave->needed_headroom += master->needed_headroom;
- slave->needed_tailroom += master->needed_tailroom;
+ user->needed_headroom += conduit->needed_headroom;
+ user->needed_tailroom += conduit->needed_tailroom;
p->xmit = cpu_dp->tag_ops->xmit;
- slave->features = master->vlan_features | NETIF_F_HW_TC;
- slave->hw_features |= NETIF_F_HW_TC;
- slave->features |= NETIF_F_LLTX;
- if (slave->needed_tailroom)
- slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
+ user->features = conduit->vlan_features | NETIF_F_HW_TC;
+ user->hw_features |= NETIF_F_HW_TC;
+ user->features |= NETIF_F_LLTX;
+ if (user->needed_tailroom)
+ user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST);
if (ds->needs_standalone_vlan_filtering)
- slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ user->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
-int dsa_slave_suspend(struct net_device *slave_dev)
+int dsa_user_suspend(struct net_device *user_dev)
{
- struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct dsa_port *dp = dsa_user_to_port(user_dev);
- if (!netif_running(slave_dev))
+ if (!netif_running(user_dev))
return 0;
- netif_device_detach(slave_dev);
+ netif_device_detach(user_dev);
rtnl_lock();
phylink_stop(dp->pl);
@@ -2582,14 +2582,14 @@ int dsa_slave_suspend(struct net_device *slave_dev)
return 0;
}
-int dsa_slave_resume(struct net_device *slave_dev)
+int dsa_user_resume(struct net_device *user_dev)
{
- struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+ struct dsa_port *dp = dsa_user_to_port(user_dev);
- if (!netif_running(slave_dev))
+ if (!netif_running(user_dev))
return 0;
- netif_device_attach(slave_dev);
+ netif_device_attach(user_dev);
rtnl_lock();
phylink_start(dp->pl);
@@ -2598,12 +2598,12 @@ int dsa_slave_resume(struct net_device *slave_dev)
return 0;
}
-int dsa_slave_create(struct dsa_port *port)
+int dsa_user_create(struct dsa_port *port)
{
- struct net_device *master = dsa_port_to_master(port);
+ struct net_device *conduit = dsa_port_to_conduit(port);
struct dsa_switch *ds = port->ds;
- struct net_device *slave_dev;
- struct dsa_slave_priv *p;
+ struct net_device *user_dev;
+ struct dsa_user_priv *p;
const char *name;
int assign_type;
int ret;
@@ -2619,55 +2619,55 @@ int dsa_slave_create(struct dsa_port *port)
assign_type = NET_NAME_ENUM;
}
- slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
- assign_type, ether_setup,
- ds->num_tx_queues, 1);
- if (slave_dev == NULL)
+ user_dev = alloc_netdev_mqs(sizeof(struct dsa_user_priv), name,
+ assign_type, ether_setup,
+ ds->num_tx_queues, 1);
+ if (user_dev == NULL)
return -ENOMEM;
- slave_dev->rtnl_link_ops = &dsa_link_ops;
- slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
+ user_dev->rtnl_link_ops = &dsa_link_ops;
+ user_dev->ethtool_ops = &dsa_user_ethtool_ops;
#if IS_ENABLED(CONFIG_DCB)
- slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops;
+ user_dev->dcbnl_ops = &dsa_user_dcbnl_ops;
#endif
if (!is_zero_ether_addr(port->mac))
- eth_hw_addr_set(slave_dev, port->mac);
+ eth_hw_addr_set(user_dev, port->mac);
else
- eth_hw_addr_inherit(slave_dev, master);
- slave_dev->priv_flags |= IFF_NO_QUEUE;
+ eth_hw_addr_inherit(user_dev, conduit);
+ user_dev->priv_flags |= IFF_NO_QUEUE;
if (dsa_switch_supports_uc_filtering(ds))
- slave_dev->priv_flags |= IFF_UNICAST_FLT;
- slave_dev->netdev_ops = &dsa_slave_netdev_ops;
+ user_dev->priv_flags |= IFF_UNICAST_FLT;
+ user_dev->netdev_ops = &dsa_user_netdev_ops;
if (ds->ops->port_max_mtu)
- slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
- SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
-
- SET_NETDEV_DEV(slave_dev, port->ds->dev);
- SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port);
- slave_dev->dev.of_node = port->dn;
- slave_dev->vlan_features = master->vlan_features;
-
- p = netdev_priv(slave_dev);
- slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!slave_dev->tstats) {
- free_netdev(slave_dev);
+ user_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
+ SET_NETDEV_DEVTYPE(user_dev, &dsa_type);
+
+ SET_NETDEV_DEV(user_dev, port->ds->dev);
+ SET_NETDEV_DEVLINK_PORT(user_dev, &port->devlink_port);
+ user_dev->dev.of_node = port->dn;
+ user_dev->vlan_features = conduit->vlan_features;
+
+ p = netdev_priv(user_dev);
+ user_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!user_dev->tstats) {
+ free_netdev(user_dev);
return -ENOMEM;
}
- ret = gro_cells_init(&p->gcells, slave_dev);
+ ret = gro_cells_init(&p->gcells, user_dev);
if (ret)
goto out_free;
p->dp = port;
INIT_LIST_HEAD(&p->mall_tc_list);
- port->slave = slave_dev;
- dsa_slave_setup_tagger(slave_dev);
+ port->user = user_dev;
+ dsa_user_setup_tagger(user_dev);
- netif_carrier_off(slave_dev);
+ netif_carrier_off(user_dev);
- ret = dsa_slave_phy_setup(slave_dev);
+ ret = dsa_user_phy_setup(user_dev);
if (ret) {
- netdev_err(slave_dev,
+ netdev_err(user_dev,
"error %d setting up PHY for tree %d, switch %d, port %d\n",
ret, ds->dst->index, ds->index, port->index);
goto out_gcells;
@@ -2675,23 +2675,23 @@ int dsa_slave_create(struct dsa_port *port)
rtnl_lock();
- ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
+ ret = dsa_user_change_mtu(user_dev, ETH_DATA_LEN);
if (ret && ret != -EOPNOTSUPP)
dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
ret, ETH_DATA_LEN, port->index);
- ret = register_netdevice(slave_dev);
+ ret = register_netdevice(user_dev);
if (ret) {
- netdev_err(master, "error %d registering interface %s\n",
- ret, slave_dev->name);
+ netdev_err(conduit, "error %d registering interface %s\n",
+ ret, user_dev->name);
rtnl_unlock();
goto out_phy;
}
if (IS_ENABLED(CONFIG_DCB)) {
- ret = dsa_slave_dcbnl_init(slave_dev);
+ ret = dsa_user_dcbnl_init(user_dev);
if (ret) {
- netdev_err(slave_dev,
+ netdev_err(user_dev,
"failed to initialize DCB: %pe\n",
ERR_PTR(ret));
rtnl_unlock();
@@ -2699,7 +2699,7 @@ int dsa_slave_create(struct dsa_port *port)
}
}
- ret = netdev_upper_dev_link(master, slave_dev, NULL);
+ ret = netdev_upper_dev_link(conduit, user_dev, NULL);
rtnl_unlock();
@@ -2709,7 +2709,7 @@ int dsa_slave_create(struct dsa_port *port)
return 0;
out_unregister:
- unregister_netdev(slave_dev);
+ unregister_netdev(user_dev);
out_phy:
rtnl_lock();
phylink_disconnect_phy(p->dp->pl);
@@ -2718,122 +2718,122 @@ out_phy:
out_gcells:
gro_cells_destroy(&p->gcells);
out_free:
- free_percpu(slave_dev->tstats);
- free_netdev(slave_dev);
- port->slave = NULL;
+ free_percpu(user_dev->tstats);
+ free_netdev(user_dev);
+ port->user = NULL;
return ret;
}
-void dsa_slave_destroy(struct net_device *slave_dev)
+void dsa_user_destroy(struct net_device *user_dev)
{
- struct net_device *master = dsa_slave_to_master(slave_dev);
- struct dsa_port *dp = dsa_slave_to_port(slave_dev);
- struct dsa_slave_priv *p = netdev_priv(slave_dev);
+ struct net_device *conduit = dsa_user_to_conduit(user_dev);
+ struct dsa_port *dp = dsa_user_to_port(user_dev);
+ struct dsa_user_priv *p = netdev_priv(user_dev);
- netif_carrier_off(slave_dev);
+ netif_carrier_off(user_dev);
rtnl_lock();
- netdev_upper_dev_unlink(master, slave_dev);
- unregister_netdevice(slave_dev);
+ netdev_upper_dev_unlink(conduit, user_dev);
+ unregister_netdevice(user_dev);
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
dsa_port_phylink_destroy(dp);
gro_cells_destroy(&p->gcells);
- free_percpu(slave_dev->tstats);
- free_netdev(slave_dev);
+ free_percpu(user_dev->tstats);
+ free_netdev(user_dev);
}
-int dsa_slave_change_master(struct net_device *dev, struct net_device *master,
+int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit,
struct netlink_ext_ack *extack)
{
- struct net_device *old_master = dsa_slave_to_master(dev);
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *old_conduit = dsa_user_to_conduit(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch *ds = dp->ds;
struct net_device *upper;
struct list_head *iter;
int err;
- if (master == old_master)
+ if (conduit == old_conduit)
return 0;
- if (!ds->ops->port_change_master) {
+ if (!ds->ops->port_change_conduit) {
NL_SET_ERR_MSG_MOD(extack,
- "Driver does not support changing DSA master");
+ "Driver does not support changing DSA conduit");
return -EOPNOTSUPP;
}
- if (!netdev_uses_dsa(master)) {
+ if (!netdev_uses_dsa(conduit)) {
NL_SET_ERR_MSG_MOD(extack,
- "Interface not eligible as DSA master");
+ "Interface not eligible as DSA conduit");
return -EOPNOTSUPP;
}
- netdev_for_each_upper_dev_rcu(master, upper, iter) {
- if (dsa_slave_dev_check(upper))
+ netdev_for_each_upper_dev_rcu(conduit, upper, iter) {
+ if (dsa_user_dev_check(upper))
continue;
if (netif_is_bridge_master(upper))
continue;
- NL_SET_ERR_MSG_MOD(extack, "Cannot join master with unknown uppers");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot join conduit with unknown uppers");
return -EOPNOTSUPP;
}
- /* Since we allow live-changing the DSA master, plus we auto-open the
- * DSA master when the user port opens => we need to ensure that the
- * new DSA master is open too.
+ /* Since we allow live-changing the DSA conduit, plus we auto-open the
+ * DSA conduit when the user port opens => we need to ensure that the
+ * new DSA conduit is open too.
*/
if (dev->flags & IFF_UP) {
- err = dev_open(master, extack);
+ err = dev_open(conduit, extack);
if (err)
return err;
}
- netdev_upper_dev_unlink(old_master, dev);
+ netdev_upper_dev_unlink(old_conduit, dev);
- err = netdev_upper_dev_link(master, dev, extack);
+ err = netdev_upper_dev_link(conduit, dev, extack);
if (err)
- goto out_revert_old_master_unlink;
+ goto out_revert_old_conduit_unlink;
- err = dsa_port_change_master(dp, master, extack);
+ err = dsa_port_change_conduit(dp, conduit, extack);
if (err)
- goto out_revert_master_link;
+ goto out_revert_conduit_link;
/* Update the MTU of the new CPU port through cross-chip notifiers */
- err = dsa_slave_change_mtu(dev, dev->mtu);
+ err = dsa_user_change_mtu(dev, dev->mtu);
if (err && err != -EOPNOTSUPP) {
netdev_warn(dev,
- "nonfatal error updating MTU with new master: %pe\n",
+ "nonfatal error updating MTU with new conduit: %pe\n",
ERR_PTR(err));
}
/* If the port doesn't have its own MAC address and relies on the DSA
- * master's one, inherit it again from the new DSA master.
+ * conduit's one, inherit it again from the new DSA conduit.
*/
if (is_zero_ether_addr(dp->mac))
- eth_hw_addr_inherit(dev, master);
+ eth_hw_addr_inherit(dev, conduit);
return 0;
-out_revert_master_link:
- netdev_upper_dev_unlink(master, dev);
-out_revert_old_master_unlink:
- netdev_upper_dev_link(old_master, dev, NULL);
+out_revert_conduit_link:
+ netdev_upper_dev_unlink(conduit, dev);
+out_revert_old_conduit_unlink:
+ netdev_upper_dev_link(old_conduit, dev, NULL);
return err;
}
-bool dsa_slave_dev_check(const struct net_device *dev)
+bool dsa_user_dev_check(const struct net_device *dev)
{
- return dev->netdev_ops == &dsa_slave_netdev_ops;
+ return dev->netdev_ops == &dsa_user_netdev_ops;
}
-EXPORT_SYMBOL_GPL(dsa_slave_dev_check);
+EXPORT_SYMBOL_GPL(dsa_user_dev_check);
-static int dsa_slave_changeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+static int dsa_user_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;
- if (!dsa_slave_dev_check(dev))
+ if (!dsa_user_dev_check(dev))
return err;
extack = netdev_notifier_info_to_extack(&info->info);
@@ -2885,28 +2885,28 @@ static int dsa_slave_changeupper(struct net_device *dev,
return err;
}
-static int dsa_slave_prechangeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+static int dsa_user_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
- if (!dsa_slave_dev_check(dev))
+ if (!dsa_user_dev_check(dev))
return NOTIFY_DONE;
if (netif_is_bridge_master(info->upper_dev) && !info->linking)
dsa_port_pre_bridge_leave(dp, info->upper_dev);
else if (netif_is_lag_master(info->upper_dev) && !info->linking)
dsa_port_pre_lag_leave(dp, info->upper_dev);
- /* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be
- * meaningfully enslaved to a bridge yet
+ /* dsa_port_pre_hsr_leave is not yet necessary since hsr devices cannot
+ * meaningfully placed under a bridge yet
*/
return NOTIFY_DONE;
}
static int
-dsa_slave_lag_changeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+dsa_user_lag_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct net_device *lower;
struct list_head *iter;
@@ -2917,15 +2917,15 @@ dsa_slave_lag_changeupper(struct net_device *dev,
return err;
netdev_for_each_lower_dev(dev, lower, iter) {
- if (!dsa_slave_dev_check(lower))
+ if (!dsa_user_dev_check(lower))
continue;
- dp = dsa_slave_to_port(lower);
+ dp = dsa_user_to_port(lower);
if (!dp->lag)
/* Software LAG */
continue;
- err = dsa_slave_changeupper(lower, info);
+ err = dsa_user_changeupper(lower, info);
if (notifier_to_errno(err))
break;
}
@@ -2933,12 +2933,12 @@ dsa_slave_lag_changeupper(struct net_device *dev,
return err;
}
-/* Same as dsa_slave_lag_changeupper() except that it calls
- * dsa_slave_prechangeupper()
+/* Same as dsa_user_lag_changeupper() except that it calls
+ * dsa_user_prechangeupper()
*/
static int
-dsa_slave_lag_prechangeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+dsa_user_lag_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct net_device *lower;
struct list_head *iter;
@@ -2949,15 +2949,15 @@ dsa_slave_lag_prechangeupper(struct net_device *dev,
return err;
netdev_for_each_lower_dev(dev, lower, iter) {
- if (!dsa_slave_dev_check(lower))
+ if (!dsa_user_dev_check(lower))
continue;
- dp = dsa_slave_to_port(lower);
+ dp = dsa_user_to_port(lower);
if (!dp->lag)
/* Software LAG */
continue;
- err = dsa_slave_prechangeupper(lower, info);
+ err = dsa_user_prechangeupper(lower, info);
if (notifier_to_errno(err))
break;
}
@@ -2970,7 +2970,7 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *ext_ack;
- struct net_device *slave, *br;
+ struct net_device *user, *br;
struct dsa_port *dp;
ext_ack = netdev_notifier_info_to_extack(&info->info);
@@ -2978,11 +2978,11 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
if (!is_vlan_dev(dev))
return NOTIFY_DONE;
- slave = vlan_dev_real_dev(dev);
- if (!dsa_slave_dev_check(slave))
+ user = vlan_dev_real_dev(dev);
+ if (!dsa_user_dev_check(user))
return NOTIFY_DONE;
- dp = dsa_slave_to_port(slave);
+ dp = dsa_user_to_port(user);
br = dsa_port_bridge_dev_get(dp);
if (!br)
return NOTIFY_DONE;
@@ -2991,7 +2991,7 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
if (br_vlan_enabled(br) &&
netif_is_bridge_master(info->upper_dev) && info->linking) {
NL_SET_ERR_MSG_MOD(ext_ack,
- "Cannot enslave VLAN device into VLAN aware bridge");
+ "Cannot make VLAN device join VLAN-aware bridge");
return notifier_from_errno(-EINVAL);
}
@@ -2999,10 +2999,10 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
}
static int
-dsa_slave_check_8021q_upper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+dsa_user_check_8021q_upper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
struct net_device *br = dsa_port_bridge_dev_get(dp);
struct bridge_vlan_info br_info;
struct netlink_ext_ack *extack;
@@ -3030,17 +3030,17 @@ dsa_slave_check_8021q_upper(struct net_device *dev,
}
static int
-dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+dsa_user_prechangeupper_sanity_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct dsa_switch *ds;
struct dsa_port *dp;
int err;
- if (!dsa_slave_dev_check(dev))
+ if (!dsa_user_dev_check(dev))
return dsa_prevent_bridging_8021q_upper(dev, info);
- dp = dsa_slave_to_port(dev);
+ dp = dsa_user_to_port(dev);
ds = dp->ds;
if (ds->ops->port_prechangeupper) {
@@ -3050,17 +3050,17 @@ dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
}
if (is_vlan_dev(info->upper_dev))
- return dsa_slave_check_8021q_upper(dev, info);
+ return dsa_user_check_8021q_upper(dev, info);
return NOTIFY_DONE;
}
-/* To be eligible as a DSA master, a LAG must have all lower interfaces be
- * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
+/* To be eligible as a DSA conduit, a LAG must have all lower interfaces be
+ * eligible DSA conduits. Additionally, all LAG slaves must be DSA conduits of
* switches in the same switch tree.
*/
-static int dsa_lag_master_validate(struct net_device *lag_dev,
- struct netlink_ext_ack *extack)
+static int dsa_lag_conduit_validate(struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
{
struct net_device *lower1, *lower2;
struct list_head *iter1, *iter2;
@@ -3070,7 +3070,7 @@ static int dsa_lag_master_validate(struct net_device *lag_dev,
if (!netdev_uses_dsa(lower1) ||
!netdev_uses_dsa(lower2)) {
NL_SET_ERR_MSG_MOD(extack,
- "All LAG ports must be eligible as DSA masters");
+ "All LAG ports must be eligible as DSA conduits");
return notifier_from_errno(-EINVAL);
}
@@ -3080,7 +3080,7 @@ static int dsa_lag_master_validate(struct net_device *lag_dev,
if (!dsa_port_tree_same(lower1->dsa_ptr,
lower2->dsa_ptr)) {
NL_SET_ERR_MSG_MOD(extack,
- "LAG contains DSA masters of disjoint switch trees");
+ "LAG contains DSA conduits of disjoint switch trees");
return notifier_from_errno(-EINVAL);
}
}
@@ -3090,41 +3090,41 @@ static int dsa_lag_master_validate(struct net_device *lag_dev,
}
static int
-dsa_master_prechangeupper_sanity_check(struct net_device *master,
- struct netdev_notifier_changeupper_info *info)
+dsa_conduit_prechangeupper_sanity_check(struct net_device *conduit,
+ struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
- if (!netdev_uses_dsa(master))
+ if (!netdev_uses_dsa(conduit))
return NOTIFY_DONE;
if (!info->linking)
return NOTIFY_DONE;
/* Allow DSA switch uppers */
- if (dsa_slave_dev_check(info->upper_dev))
+ if (dsa_user_dev_check(info->upper_dev))
return NOTIFY_DONE;
- /* Allow bridge uppers of DSA masters, subject to further
+ /* Allow bridge uppers of DSA conduits, subject to further
* restrictions in dsa_bridge_prechangelower_sanity_check()
*/
if (netif_is_bridge_master(info->upper_dev))
return NOTIFY_DONE;
/* Allow LAG uppers, subject to further restrictions in
- * dsa_lag_master_prechangelower_sanity_check()
+ * dsa_lag_conduit_prechangelower_sanity_check()
*/
if (netif_is_lag_master(info->upper_dev))
- return dsa_lag_master_validate(info->upper_dev, extack);
+ return dsa_lag_conduit_validate(info->upper_dev, extack);
NL_SET_ERR_MSG_MOD(extack,
- "DSA master cannot join unknown upper interfaces");
+ "DSA conduit cannot join unknown upper interfaces");
return notifier_from_errno(-EBUSY);
}
static int
-dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+dsa_lag_conduit_prechangelower_sanity_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
struct net_device *lag_dev = info->upper_dev;
@@ -3139,14 +3139,14 @@ dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
if (!netdev_uses_dsa(dev)) {
NL_SET_ERR_MSG(extack,
- "Only DSA masters can join a LAG DSA master");
+ "Only DSA conduits can join a LAG DSA conduit");
return notifier_from_errno(-EINVAL);
}
netdev_for_each_lower_dev(lag_dev, lower, iter) {
if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
NL_SET_ERR_MSG(extack,
- "Interface is DSA master for a different switch tree than this LAG");
+ "Interface is DSA conduit for a different switch tree than this LAG");
return notifier_from_errno(-EINVAL);
}
@@ -3156,13 +3156,13 @@ dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
return NOTIFY_DONE;
}
-/* Don't allow bridging of DSA masters, since the bridge layer rx_handler
+/* Don't allow bridging of DSA conduits, since the bridge layer rx_handler
* prevents the DSA fake ethertype handler to be invoked, so we don't get the
* chance to strip off and parse the DSA switch tag protocol header (the bridge
* layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these
* frames).
* The only case where that would not be an issue is when bridging can already
- * be offloaded, such as when the DSA master is itself a DSA or plain switchdev
+ * be offloaded, such as when the DSA conduit is itself a DSA or plain switchdev
* port, and is bridged only with other ports from the same hardware device.
*/
static int
@@ -3188,7 +3188,7 @@ dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
if (!netdev_port_same_parent_id(lower, new_lower)) {
NL_SET_ERR_MSG(extack,
- "Cannot do software bridging with a DSA master");
+ "Cannot do software bridging with a DSA conduit");
return notifier_from_errno(-EINVAL);
}
}
@@ -3196,45 +3196,45 @@ dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
return NOTIFY_DONE;
}
-static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst,
- struct net_device *lag_dev)
+static void dsa_tree_migrate_ports_from_lag_conduit(struct dsa_switch_tree *dst,
+ struct net_device *lag_dev)
{
- struct net_device *new_master = dsa_tree_find_first_master(dst);
+ struct net_device *new_conduit = dsa_tree_find_first_conduit(dst);
struct dsa_port *dp;
int err;
dsa_tree_for_each_user_port(dp, dst) {
- if (dsa_port_to_master(dp) != lag_dev)
+ if (dsa_port_to_conduit(dp) != lag_dev)
continue;
- err = dsa_slave_change_master(dp->slave, new_master, NULL);
+ err = dsa_user_change_conduit(dp->user, new_conduit, NULL);
if (err) {
- netdev_err(dp->slave,
- "failed to restore master to %s: %pe\n",
- new_master->name, ERR_PTR(err));
+ netdev_err(dp->user,
+ "failed to restore conduit to %s: %pe\n",
+ new_conduit->name, ERR_PTR(err));
}
}
}
-static int dsa_master_lag_join(struct net_device *master,
- struct net_device *lag_dev,
- struct netdev_lag_upper_info *uinfo,
- struct netlink_ext_ack *extack)
+static int dsa_conduit_lag_join(struct net_device *conduit,
+ struct net_device *lag_dev,
+ struct netdev_lag_upper_info *uinfo,
+ struct netlink_ext_ack *extack)
{
- struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *dp;
int err;
- err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack);
+ err = dsa_conduit_lag_setup(lag_dev, cpu_dp, uinfo, extack);
if (err)
return err;
dsa_tree_for_each_user_port(dp, dst) {
- if (dsa_port_to_master(dp) != master)
+ if (dsa_port_to_conduit(dp) != conduit)
continue;
- err = dsa_slave_change_master(dp->slave, lag_dev, extack);
+ err = dsa_user_change_conduit(dp->user, lag_dev, extack);
if (err)
goto restore;
}
@@ -3243,24 +3243,24 @@ static int dsa_master_lag_join(struct net_device *master,
restore:
dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
- if (dsa_port_to_master(dp) != lag_dev)
+ if (dsa_port_to_conduit(dp) != lag_dev)
continue;
- err = dsa_slave_change_master(dp->slave, master, NULL);
+ err = dsa_user_change_conduit(dp->user, conduit, NULL);
if (err) {
- netdev_err(dp->slave,
- "failed to restore master to %s: %pe\n",
- master->name, ERR_PTR(err));
+ netdev_err(dp->user,
+ "failed to restore conduit to %s: %pe\n",
+ conduit->name, ERR_PTR(err));
}
}
- dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
+ dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr);
return err;
}
-static void dsa_master_lag_leave(struct net_device *master,
- struct net_device *lag_dev)
+static void dsa_conduit_lag_leave(struct net_device *conduit,
+ struct net_device *lag_dev)
{
struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
@@ -3277,10 +3277,10 @@ static void dsa_master_lag_leave(struct net_device *master,
if (new_cpu_dp) {
/* Update the CPU port of the user ports still under the LAG
- * so that dsa_port_to_master() continues to work properly
+ * so that dsa_port_to_conduit() continues to work properly
*/
dsa_tree_for_each_user_port(dp, dst)
- if (dsa_port_to_master(dp) == lag_dev)
+ if (dsa_port_to_conduit(dp) == lag_dev)
dp->cpu_dp = new_cpu_dp;
/* Update the index of the virtual CPU port to match the lowest
@@ -3289,20 +3289,20 @@ static void dsa_master_lag_leave(struct net_device *master,
lag_dev->dsa_ptr = new_cpu_dp;
wmb();
} else {
- /* If the LAG DSA master has no ports left, migrate back all
+ /* If the LAG DSA conduit has no ports left, migrate back all
* user ports to the first physical CPU port
*/
- dsa_tree_migrate_ports_from_lag_master(dst, lag_dev);
+ dsa_tree_migrate_ports_from_lag_conduit(dst, lag_dev);
}
- /* This DSA master has left its LAG in any case, so let
+ /* This DSA conduit has left its LAG in any case, so let
* the CPU port leave the hardware LAG as well
*/
- dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
+ dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr);
}
-static int dsa_master_changeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+static int dsa_conduit_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;
@@ -3314,11 +3314,11 @@ static int dsa_master_changeupper(struct net_device *dev,
if (netif_is_lag_master(info->upper_dev)) {
if (info->linking) {
- err = dsa_master_lag_join(dev, info->upper_dev,
- info->upper_info, extack);
+ err = dsa_conduit_lag_join(dev, info->upper_dev,
+ info->upper_info, extack);
err = notifier_from_errno(err);
} else {
- dsa_master_lag_leave(dev, info->upper_dev);
+ dsa_conduit_lag_leave(dev, info->upper_dev);
err = NOTIFY_OK;
}
}
@@ -3326,8 +3326,8 @@ static int dsa_master_changeupper(struct net_device *dev,
return err;
}
-static int dsa_slave_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int dsa_user_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
@@ -3336,15 +3336,15 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
struct netdev_notifier_changeupper_info *info = ptr;
int err;
- err = dsa_slave_prechangeupper_sanity_check(dev, info);
+ err = dsa_user_prechangeupper_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
- err = dsa_master_prechangeupper_sanity_check(dev, info);
+ err = dsa_conduit_prechangeupper_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
- err = dsa_lag_master_prechangelower_sanity_check(dev, info);
+ err = dsa_lag_conduit_prechangelower_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
@@ -3352,11 +3352,11 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
if (notifier_to_errno(err))
return err;
- err = dsa_slave_prechangeupper(dev, ptr);
+ err = dsa_user_prechangeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
- err = dsa_slave_lag_prechangeupper(dev, ptr);
+ err = dsa_user_lag_prechangeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
@@ -3365,15 +3365,15 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
case NETDEV_CHANGEUPPER: {
int err;
- err = dsa_slave_changeupper(dev, ptr);
+ err = dsa_user_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
- err = dsa_slave_lag_changeupper(dev, ptr);
+ err = dsa_user_lag_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
- err = dsa_master_changeupper(dev, ptr);
+ err = dsa_conduit_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;
@@ -3384,13 +3384,13 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
struct dsa_port *dp;
int err = 0;
- if (dsa_slave_dev_check(dev)) {
- dp = dsa_slave_to_port(dev);
+ if (dsa_user_dev_check(dev)) {
+ dp = dsa_user_to_port(dev);
err = dsa_port_lag_change(dp, info->lower_state_info);
}
- /* Mirror LAG port events on DSA masters that are in
+ /* Mirror LAG port events on DSA conduits that are in
* a LAG towards their respective switch CPU ports
*/
if (netdev_uses_dsa(dev)) {
@@ -3403,28 +3403,28 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
}
case NETDEV_CHANGE:
case NETDEV_UP: {
- /* Track state of master port.
- * DSA driver may require the master port (and indirectly
+ /* Track state of conduit port.
+ * DSA driver may require the conduit port (and indirectly
* the tagger) to be available for some special operation.
*/
if (netdev_uses_dsa(dev)) {
struct dsa_port *cpu_dp = dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->ds->dst;
- /* Track when the master port is UP */
- dsa_tree_master_oper_state_change(dst, dev,
- netif_oper_up(dev));
+ /* Track when the conduit port is UP */
+ dsa_tree_conduit_oper_state_change(dst, dev,
+ netif_oper_up(dev));
- /* Track when the master port is ready and can accept
+ /* Track when the conduit port is ready and can accept
* packet.
* NETDEV_UP event is not enough to flag a port as ready.
* We also have to wait for linkwatch_do_dev to dev_activate
* and emit a NETDEV_CHANGE event.
- * We check if a master port is ready by checking if the dev
+ * We check if a conduit port is ready by checking if the dev
* have a qdisc assigned and is not noop.
*/
- dsa_tree_master_admin_state_change(dst, dev,
- !qdisc_tx_is_noop(dev));
+ dsa_tree_conduit_admin_state_change(dst, dev,
+ !qdisc_tx_is_noop(dev));
return NOTIFY_OK;
}
@@ -3442,7 +3442,7 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
cpu_dp = dev->dsa_ptr;
dst = cpu_dp->ds->dst;
- dsa_tree_master_admin_state_change(dst, dev, false);
+ dsa_tree_conduit_admin_state_change(dst, dev, false);
list_for_each_entry(dp, &dst->ports, list) {
if (!dsa_port_is_user(dp))
@@ -3451,7 +3451,7 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
if (dp->cpu_dp != cpu_dp)
continue;
- list_add(&dp->slave->close_list, &close_list);
+ list_add(&dp->user->close_list, &close_list);
}
dev_close_many(&close_list, true);
@@ -3477,7 +3477,7 @@ dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work)
switchdev_work->orig_dev, &info.info, NULL);
}
-static void dsa_slave_switchdev_event_work(struct work_struct *work)
+static void dsa_user_switchdev_event_work(struct work_struct *work)
{
struct dsa_switchdev_event_work *switchdev_work =
container_of(work, struct dsa_switchdev_event_work, work);
@@ -3488,7 +3488,7 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
struct dsa_port *dp;
int err;
- dp = dsa_slave_to_port(dev);
+ dp = dsa_user_to_port(dev);
ds = dp->ds;
switch (switchdev_work->event) {
@@ -3530,7 +3530,7 @@ static void dsa_slave_switchdev_event_work(struct work_struct *work)
static bool dsa_foreign_dev_check(const struct net_device *dev,
const struct net_device *foreign_dev)
{
- const struct dsa_port *dp = dsa_slave_to_port(dev);
+ const struct dsa_port *dp = dsa_user_to_port(dev);
struct dsa_switch_tree *dst = dp->ds->dst;
if (netif_is_bridge_master(foreign_dev))
@@ -3543,13 +3543,13 @@ static bool dsa_foreign_dev_check(const struct net_device *dev,
return true;
}
-static int dsa_slave_fdb_event(struct net_device *dev,
- struct net_device *orig_dev,
- unsigned long event, const void *ctx,
- const struct switchdev_notifier_fdb_info *fdb_info)
+static int dsa_user_fdb_event(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info)
{
struct dsa_switchdev_event_work *switchdev_work;
- struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct dsa_port *dp = dsa_user_to_port(dev);
bool host_addr = fdb_info->is_local;
struct dsa_switch *ds = dp->ds;
@@ -3598,7 +3598,7 @@ static int dsa_slave_fdb_event(struct net_device *dev,
orig_dev->name, fdb_info->addr, fdb_info->vid,
host_addr ? " as host address" : "");
- INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work);
+ INIT_WORK(&switchdev_work->work, dsa_user_switchdev_event_work);
switchdev_work->event = event;
switchdev_work->dev = dev;
switchdev_work->orig_dev = orig_dev;
@@ -3613,8 +3613,8 @@ static int dsa_slave_fdb_event(struct net_device *dev,
}
/* Called under rcu_read_lock() */
-static int dsa_slave_switchdev_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int dsa_user_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
@@ -3622,15 +3622,15 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
switch (event) {
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
- dsa_slave_dev_check,
- dsa_slave_port_attr_set);
+ dsa_user_dev_check,
+ dsa_user_port_attr_set);
return notifier_from_errno(err);
case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
- dsa_slave_dev_check,
+ dsa_user_dev_check,
dsa_foreign_dev_check,
- dsa_slave_fdb_event);
+ dsa_user_fdb_event);
return notifier_from_errno(err);
default:
return NOTIFY_DONE;
@@ -3639,8 +3639,8 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
return NOTIFY_OK;
}
-static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int dsa_user_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
@@ -3648,52 +3648,52 @@ static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
err = switchdev_handle_port_obj_add_foreign(dev, ptr,
- dsa_slave_dev_check,
+ dsa_user_dev_check,
dsa_foreign_dev_check,
- dsa_slave_port_obj_add);
+ dsa_user_port_obj_add);
return notifier_from_errno(err);
case SWITCHDEV_PORT_OBJ_DEL:
err = switchdev_handle_port_obj_del_foreign(dev, ptr,
- dsa_slave_dev_check,
+ dsa_user_dev_check,
dsa_foreign_dev_check,
- dsa_slave_port_obj_del);
+ dsa_user_port_obj_del);
return notifier_from_errno(err);
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
- dsa_slave_dev_check,
- dsa_slave_port_attr_set);
+ dsa_user_dev_check,
+ dsa_user_port_attr_set);
return notifier_from_errno(err);
}
return NOTIFY_DONE;
}
-static struct notifier_block dsa_slave_nb __read_mostly = {
- .notifier_call = dsa_slave_netdevice_event,
+static struct notifier_block dsa_user_nb __read_mostly = {
+ .notifier_call = dsa_user_netdevice_event,
};
-struct notifier_block dsa_slave_switchdev_notifier = {
- .notifier_call = dsa_slave_switchdev_event,
+struct notifier_block dsa_user_switchdev_notifier = {
+ .notifier_call = dsa_user_switchdev_event,
};
-struct notifier_block dsa_slave_switchdev_blocking_notifier = {
- .notifier_call = dsa_slave_switchdev_blocking_event,
+struct notifier_block dsa_user_switchdev_blocking_notifier = {
+ .notifier_call = dsa_user_switchdev_blocking_event,
};
-int dsa_slave_register_notifier(void)
+int dsa_user_register_notifier(void)
{
struct notifier_block *nb;
int err;
- err = register_netdevice_notifier(&dsa_slave_nb);
+ err = register_netdevice_notifier(&dsa_user_nb);
if (err)
return err;
- err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
+ err = register_switchdev_notifier(&dsa_user_switchdev_notifier);
if (err)
goto err_switchdev_nb;
- nb = &dsa_slave_switchdev_blocking_notifier;
+ nb = &dsa_user_switchdev_blocking_notifier;
err = register_switchdev_blocking_notifier(nb);
if (err)
goto err_switchdev_blocking_nb;
@@ -3701,27 +3701,27 @@ int dsa_slave_register_notifier(void)
return 0;
err_switchdev_blocking_nb:
- unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
+ unregister_switchdev_notifier(&dsa_user_switchdev_notifier);
err_switchdev_nb:
- unregister_netdevice_notifier(&dsa_slave_nb);
+ unregister_netdevice_notifier(&dsa_user_nb);
return err;
}
-void dsa_slave_unregister_notifier(void)
+void dsa_user_unregister_notifier(void)
{
struct notifier_block *nb;
int err;
- nb = &dsa_slave_switchdev_blocking_notifier;
+ nb = &dsa_user_switchdev_blocking_notifier;
err = unregister_switchdev_blocking_notifier(nb);
if (err)
pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
- err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
+ err = unregister_switchdev_notifier(&dsa_user_switchdev_notifier);
if (err)
pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
- err = unregister_netdevice_notifier(&dsa_slave_nb);
+ err = unregister_netdevice_notifier(&dsa_user_nb);
if (err)
- pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
+ pr_err("DSA: failed to unregister user notifier (%d)\n", err);
}
diff --git a/net/dsa/user.h b/net/dsa/user.h
new file mode 100644
index 000000000000..996069130bea
--- /dev/null
+++ b/net/dsa/user.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __DSA_USER_H
+#define __DSA_USER_H
+
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/list.h>
+#include <linux/netpoll.h>
+#include <linux/types.h>
+#include <net/dsa.h>
+#include <net/gro_cells.h>
+
+struct net_device;
+struct netlink_ext_ack;
+
+extern struct notifier_block dsa_user_switchdev_notifier;
+extern struct notifier_block dsa_user_switchdev_blocking_notifier;
+
+struct dsa_user_priv {
+ /* Copy of CPU port xmit for faster access in user transmit hot path */
+ struct sk_buff * (*xmit)(struct sk_buff *skb,
+ struct net_device *dev);
+
+ struct gro_cells gcells;
+
+ /* DSA port data, such as switch, port index, etc. */
+ struct dsa_port *dp;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+#endif
+
+ /* TC context */
+ struct list_head mall_tc_list;
+};
+
+void dsa_user_mii_bus_init(struct dsa_switch *ds);
+int dsa_user_create(struct dsa_port *dp);
+void dsa_user_destroy(struct net_device *user_dev);
+int dsa_user_suspend(struct net_device *user_dev);
+int dsa_user_resume(struct net_device *user_dev);
+int dsa_user_register_notifier(void);
+void dsa_user_unregister_notifier(void);
+void dsa_user_sync_ha(struct net_device *dev);
+void dsa_user_unsync_ha(struct net_device *dev);
+void dsa_user_setup_tagger(struct net_device *user);
+int dsa_user_change_mtu(struct net_device *dev, int new_mtu);
+int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit,
+ struct netlink_ext_ack *extack);
+int dsa_user_manage_vlan_filtering(struct net_device *dev,
+ bool vlan_filtering);
+
+static inline struct dsa_port *dsa_user_to_port(const struct net_device *dev)
+{
+ struct dsa_user_priv *p = netdev_priv(dev);
+
+ return p->dp;
+}
+
+static inline struct net_device *
+dsa_user_to_conduit(const struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_user_to_port(dev);
+
+ return dsa_port_to_conduit(dp);
+}
+
+#endif
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index dc478a0574cb..c64334363230 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -41,7 +41,6 @@ static siphash_aligned_key_t syncookie_secret[2];
* requested/supported by the syn/synack exchange.
*/
#define TSBITS 6
-#define TSMASK (((__u32)1 << TSBITS) - 1)
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
u32 count, int c)
@@ -52,6 +51,14 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
count, &syncookie_secret[c]);
}
+/* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
+static u64 tcp_ns_to_ts(bool usec_ts, u64 val)
+{
+ if (usec_ts)
+ return div_u64(val, NSEC_PER_USEC);
+
+ return div_u64(val, NSEC_PER_MSEC);
+}
/*
* when syncookies are in effect and tcp timestamps are enabled we encode
@@ -62,27 +69,24 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
*/
u64 cookie_init_timestamp(struct request_sock *req, u64 now)
{
- struct inet_request_sock *ireq;
- u32 ts, ts_now = tcp_ns_to_ts(now);
+ const struct inet_request_sock *ireq = inet_rsk(req);
+ u64 ts, ts_now = tcp_ns_to_ts(false, now);
u32 options = 0;
- ireq = inet_rsk(req);
-
options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
if (ireq->sack_ok)
options |= TS_OPT_SACK;
if (ireq->ecn_ok)
options |= TS_OPT_ECN;
- ts = ts_now & ~TSMASK;
+ ts = (ts_now >> TSBITS) << TSBITS;
ts |= options;
- if (ts > ts_now) {
- ts >>= TSBITS;
- ts--;
- ts <<= TSBITS;
- ts |= options;
- }
- return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
+ if (ts > ts_now)
+ ts -= (1UL << TSBITS);
+
+ if (tcp_rsk(req)->req_usec_ts)
+ return ts * NSEC_PER_USEC;
+ return ts * NSEC_PER_MSEC;
}
@@ -302,6 +306,8 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
treq->af_specific = af_ops;
treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
+ treq->req_usec_ts = -1;
+
#if IS_ENABLED(CONFIG_MPTCP)
treq->is_mptcp = sk_is_mptcp(sk);
if (treq->is_mptcp) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 56a8d936000f..a86d8200a1e8 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3629,10 +3629,16 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
tp->fastopen_no_cookie = val;
break;
case TCP_TIMESTAMP:
- if (!tp->repair)
+ if (!tp->repair) {
err = -EPERM;
- else
- WRITE_ONCE(tp->tsoffset, val - tcp_time_stamp_raw());
+ break;
+ }
+ /* val is an opaque field,
+ * and low order bit contains usec_ts enable bit.
+ * Its a best effort, and we do not care if user makes an error.
+ */
+ tp->tcp_usec_ts = val & 1;
+ WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts));
break;
case TCP_REPAIR_WINDOW:
err = tcp_repair_set_window(tp, optval, optlen);
@@ -3754,6 +3760,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_options |= TCPI_OPT_ECN_SEEN;
if (tp->syn_data_acked)
info->tcpi_options |= TCPI_OPT_SYN_DATA;
+ if (tp->tcp_usec_ts)
+ info->tcpi_options |= TCPI_OPT_USEC_TS;
info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
info->tcpi_ato = jiffies_to_usecs(min_t(u32, icsk->icsk_ack.ato,
@@ -3817,10 +3825,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_total_rto = tp->total_rto;
info->tcpi_total_rto_recoveries = tp->total_rto_recoveries;
info->tcpi_total_rto_time = tp->total_rto_time;
- if (tp->rto_stamp) {
- info->tcpi_total_rto_time += tcp_time_stamp_raw() -
- tp->rto_stamp;
- }
+ if (tp->rto_stamp)
+ info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp;
unlock_sock_fast(sk, slow);
}
@@ -4145,7 +4151,11 @@ int do_tcp_getsockopt(struct sock *sk, int level,
break;
case TCP_TIMESTAMP:
- val = tcp_time_stamp_raw() + READ_ONCE(tp->tsoffset);
+ val = tcp_clock_ts(tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset);
+ if (tp->tcp_usec_ts)
+ val |= 1;
+ else
+ val &= ~1;
break;
case TCP_NOTSENT_LOWAT:
val = READ_ONCE(tp->notsent_lowat);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ab87f0285b72..18b858597af4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -693,6 +693,23 @@ new_measure:
tp->rcv_rtt_est.time = tp->tcp_mstamp;
}
+static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp)
+{
+ u32 delta, delta_us;
+
+ delta = tcp_time_stamp_ts(tp) - tp->rx_opt.rcv_tsecr;
+ if (tp->tcp_usec_ts)
+ return delta;
+
+ if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
+ if (!delta)
+ delta = 1;
+ delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+ return delta_us;
+ }
+ return -1;
+}
+
static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
const struct sk_buff *skb)
{
@@ -704,15 +721,10 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
if (TCP_SKB_CB(skb)->end_seq -
TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
- u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
- u32 delta_us;
-
- if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
- if (!delta)
- delta = 1;
- delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
- tcp_rcv_rtt_update(tp, delta_us, 0);
- }
+ s32 delta = tcp_rtt_tsopt_us(tp);
+
+ if (delta >= 0)
+ tcp_rcv_rtt_update(tp, delta, 0);
}
}
@@ -2442,7 +2454,7 @@ static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
const struct sk_buff *skb)
{
return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
- tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb));
+ tcp_tsopt_ecr_before(tp, tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb));
}
/* Nothing was retransmitted or returned timestamp is less
@@ -2856,7 +2868,7 @@ void tcp_enter_recovery(struct sock *sk, bool ece_ack)
static void tcp_update_rto_time(struct tcp_sock *tp)
{
if (tp->rto_stamp) {
- tp->total_rto_time += tcp_time_stamp(tp) - tp->rto_stamp;
+ tp->total_rto_time += tcp_time_stamp_ms(tp) - tp->rto_stamp;
tp->rto_stamp = 0;
}
}
@@ -3146,17 +3158,10 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
* left edge of the send window.
* See draft-ietf-tcplw-high-performance-00, section 3.3.
*/
- if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
- flag & FLAG_ACKED) {
- u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
-
- if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
- if (!delta)
- delta = 1;
- seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
- ca_rtt_us = seq_rtt_us;
- }
- }
+ if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp &&
+ tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED)
+ seq_rtt_us = ca_rtt_us = tcp_rtt_tsopt_us(tp);
+
rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
if (seq_rtt_us < 0)
return false;
@@ -6293,7 +6298,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
- tcp_time_stamp(tp))) {
+ tcp_time_stamp_ts(tp))) {
NET_INC_STATS(sock_net(sk),
LINUX_MIB_PAWSACTIVEREJECTED);
goto reset_and_undo;
@@ -7042,6 +7047,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
req->syncookie = want_cookie;
tcp_rsk(req)->af_specific = af_ops;
tcp_rsk(req)->ts_off = 0;
+ tcp_rsk(req)->req_usec_ts = -1;
#if IS_ENABLED(CONFIG_MPTCP)
tcp_rsk(req)->is_mptcp = 0;
#endif
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a67a5de86253..7583d4e34c8c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -296,6 +296,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rt = NULL;
goto failure;
}
+ tp->tcp_usec_ts = dst_tcp_usec_ts(&rt->dst);
/* OK, now commit destination to socket. */
sk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(sk, &rt->dst);
@@ -954,7 +955,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v4_send_ack(sk, skb,
tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
- tcp_time_stamp_raw() + tcptw->tw_ts_offset,
+ tcp_tw_tsval(tcptw),
tcptw->tw_ts_recent,
tw->tw_bound_dev_if,
tcp_twsk_md5_key(tcptw),
@@ -988,7 +989,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
- tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
+ tcp_rsk_tsval(tcp_rsk(req)),
READ_ONCE(req->ts_recent),
0,
tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index ae36780977d2..52fe17167460 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -272,7 +272,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
- u32 now = tcp_time_stamp(tp);
+ u32 now = tcp_time_stamp_ts(tp);
u32 delta;
if (sample->rtt_us > 0)
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 3f87611077ef..ace806c5bd0c 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -300,6 +300,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
tcptw->tw_ts_offset = tp->tsoffset;
+ tw->tw_usec_ts = tp->tcp_usec_ts;
tcptw->tw_last_oow_ack_time = 0;
tcptw->tw_tx_delay = tp->tcp_tx_delay;
tw->tw_txhash = sk->sk_txhash;
@@ -554,21 +555,29 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->max_window = newtp->snd_wnd;
if (newtp->rx_opt.tstamp_ok) {
+ newtp->tcp_usec_ts = treq->req_usec_ts;
newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
} else {
+ newtp->tcp_usec_ts = 0;
newtp->rx_opt.ts_recent_stamp = 0;
newtp->tcp_header_len = sizeof(struct tcphdr);
}
if (req->num_timeout) {
- newtp->undo_marker = treq->snt_isn;
- newtp->retrans_stamp = div_u64(treq->snt_synack,
- USEC_PER_SEC / TCP_TS_HZ);
newtp->total_rto = req->num_timeout;
- newtp->total_rto_recoveries = 1;
- newtp->total_rto_time = tcp_time_stamp_raw() -
+ newtp->undo_marker = treq->snt_isn;
+ if (newtp->tcp_usec_ts) {
+ newtp->retrans_stamp = treq->snt_synack;
+ newtp->total_rto_time = (u32)(tcp_clock_us() -
+ newtp->retrans_stamp) / USEC_PER_MSEC;
+ } else {
+ newtp->retrans_stamp = div_u64(treq->snt_synack,
+ USEC_PER_SEC / TCP_TS_HZ);
+ newtp->total_rto_time = tcp_clock_ms() -
newtp->retrans_stamp;
+ }
+ newtp->total_rto_recoveries = 1;
}
newtp->tsoffset = treq->ts_off;
#ifdef CONFIG_TCP_MD5SIG
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 909f85aefd74..ca4d7594efd4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -799,7 +799,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) {
opts->options |= OPTION_TS;
- opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
+ opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset;
opts->tsecr = tp->rx_opt.ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
@@ -884,7 +884,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
}
if (likely(ireq->tstamp_ok)) {
opts->options |= OPTION_TS;
- opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
+ opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) +
+ tcp_rsk(req)->ts_off;
opts->tsecr = READ_ONCE(req->ts_recent);
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
@@ -943,7 +944,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
if (likely(tp->rx_opt.tstamp_ok)) {
opts->options |= OPTION_TS;
- opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
+ opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) +
+ tp->tsoffset : 0;
opts->tsecr = tp->rx_opt.ts_recent;
size += TCPOLEN_TSTAMP_ALIGNED;
}
@@ -1696,14 +1698,6 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
*/
mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
- /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
- if (icsk->icsk_af_ops->net_frag_header_len) {
- const struct dst_entry *dst = __sk_dst_get(sk);
-
- if (dst && dst_allfrag(dst))
- mss_now -= icsk->icsk_af_ops->net_frag_header_len;
- }
-
/* Clamp it (mss_clamp does not include tcp options) */
if (mss_now > tp->rx_opt.mss_clamp)
mss_now = tp->rx_opt.mss_clamp;
@@ -1731,21 +1725,11 @@ int tcp_mss_to_mtu(struct sock *sk, int mss)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
- int mtu;
- mtu = mss +
+ return mss +
tp->tcp_header_len +
icsk->icsk_ext_hdr_len +
icsk->icsk_af_ops->net_header_len;
-
- /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
- if (icsk->icsk_af_ops->net_frag_header_len) {
- const struct dst_entry *dst = __sk_dst_get(sk);
-
- if (dst && dst_allfrag(dst))
- mtu += icsk->icsk_af_ops->net_frag_header_len;
- }
- return mtu;
}
EXPORT_SYMBOL(tcp_mss_to_mtu);
@@ -3379,7 +3363,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
/* Save stamp of the first (attempted) retransmit. */
if (!tp->retrans_stamp)
- tp->retrans_stamp = tcp_skb_timestamp(skb);
+ tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb);
if (tp->undo_retrans < 0)
tp->undo_retrans = 0;
@@ -3665,6 +3649,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
memset(&opts, 0, sizeof(opts));
+ if (tcp_rsk(req)->req_usec_ts < 0)
+ tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
now = tcp_clock_ns();
#ifdef CONFIG_SYN_COOKIES
if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
@@ -3961,7 +3947,7 @@ int tcp_connect(struct sock *sk)
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
tcp_mstamp_refresh(tp);
- tp->retrans_stamp = tcp_time_stamp(tp);
+ tp->retrans_stamp = tcp_time_stamp_ts(tp);
tcp_connect_queue_skb(sk, buff);
tcp_ecn_send_syn(sk, buff);
tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 0862b73dd3b5..1f9f6c1c196b 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -26,14 +26,18 @@
static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- u32 elapsed, start_ts, user_timeout;
+ const struct tcp_sock *tp = tcp_sk(sk);
+ u32 elapsed, user_timeout;
s32 remaining;
- start_ts = tcp_sk(sk)->retrans_stamp;
user_timeout = READ_ONCE(icsk->icsk_user_timeout);
if (!user_timeout)
return icsk->icsk_rto;
- elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
+
+ elapsed = tcp_time_stamp_ts(tp) - tp->retrans_stamp;
+ if (tp->tcp_usec_ts)
+ elapsed /= USEC_PER_MSEC;
+
remaining = user_timeout - elapsed;
if (remaining <= 0)
return 1; /* user timeout has passed; fire ASAP */
@@ -212,12 +216,13 @@ static bool retransmits_timed_out(struct sock *sk,
unsigned int boundary,
unsigned int timeout)
{
- unsigned int start_ts;
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned int start_ts, delta;
if (!inet_csk(sk)->icsk_retransmits)
return false;
- start_ts = tcp_sk(sk)->retrans_stamp;
+ start_ts = tp->retrans_stamp;
if (likely(timeout == 0)) {
unsigned int rto_base = TCP_RTO_MIN;
@@ -226,7 +231,12 @@ static bool retransmits_timed_out(struct sock *sk,
timeout = tcp_model_timeout(sk, boundary, rto_base);
}
- return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
+ if (tp->tcp_usec_ts) {
+ /* delta maybe off up to a jiffy due to timer granularity. */
+ delta = tp->tcp_mstamp - start_ts + jiffies_to_usecs(1);
+ return (s32)(delta - timeout * USEC_PER_MSEC) >= 0;
+ }
+ return (s32)(tcp_time_stamp_ts(tp) - start_ts - timeout) >= 0;
}
/* A write timeout has occurred. Process the after effects. */
@@ -422,7 +432,7 @@ static void tcp_update_rto_stats(struct sock *sk)
if (!icsk->icsk_retransmits) {
tp->total_rto_recoveries++;
- tp->rto_stamp = tcp_time_stamp(tp);
+ tp->rto_stamp = tcp_time_stamp_ms(tp);
}
icsk->icsk_retransmits++;
tp->total_rto++;
@@ -462,26 +472,24 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
req->num_timeout++;
tcp_update_rto_stats(sk);
if (!tp->retrans_stamp)
- tp->retrans_stamp = tcp_time_stamp(tp);
+ tp->retrans_stamp = tcp_time_stamp_ts(tp);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
req->timeout << req->num_timeout, TCP_RTO_MAX);
}
static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
- const struct sk_buff *skb)
+ const struct sk_buff *skb,
+ u32 rtx_delta)
{
const struct tcp_sock *tp = tcp_sk(sk);
const int timeout = TCP_RTO_MAX * 2;
- u32 rcv_delta, rtx_delta;
+ u32 rcv_delta;
rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
if (rcv_delta <= timeout)
return false;
- rtx_delta = (u32)msecs_to_jiffies(tcp_time_stamp(tp) -
- (tp->retrans_stamp ?: tcp_skb_timestamp(skb)));
-
- return rtx_delta > timeout;
+ return msecs_to_jiffies(rtx_delta) > timeout;
}
/**
@@ -534,7 +542,11 @@ void tcp_retransmit_timer(struct sock *sk)
struct inet_sock *inet = inet_sk(sk);
u32 rtx_delta;
- rtx_delta = tcp_time_stamp(tp) - (tp->retrans_stamp ?: tcp_skb_timestamp(skb));
+ rtx_delta = tcp_time_stamp_ts(tp) - (tp->retrans_stamp ?:
+ tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb));
+ if (tp->tcp_usec_ts)
+ rtx_delta /= USEC_PER_MSEC;
+
if (sk->sk_family == AF_INET) {
net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
&inet->inet_daddr, ntohs(inet->inet_dport),
@@ -551,7 +563,7 @@ void tcp_retransmit_timer(struct sock *sk)
rtx_delta);
}
#endif
- if (tcp_rtx_probe0_timed_out(sk, skb)) {
+ if (tcp_rtx_probe0_timed_out(sk, skb, rtx_delta)) {
tcp_write_err(sk);
goto out;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c2d471ad7922..3aaea56b5166 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1399,6 +1399,7 @@ retry:
idev->cnf.temp_valid_lft + age);
cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
+ cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft);
cfg.plen = ifp->prefix_len;
tmp_tstamp = ifp->tstamp;
@@ -1406,15 +1407,23 @@ retry:
write_unlock_bh(&idev->lock);
- /* A temporary address is created only if this calculated Preferred
- * Lifetime is greater than REGEN_ADVANCE time units. In particular,
- * an implementation must not create a temporary address with a zero
- * Preferred Lifetime.
+ /* From RFC 4941:
+ *
+ * A temporary address is created only if this calculated Preferred
+ * Lifetime is greater than REGEN_ADVANCE time units. In
+ * particular, an implementation must not create a temporary address
+ * with a zero Preferred Lifetime.
+ *
+ * Clamp the preferred lifetime to a minimum of regen_advance, unless
+ * that would exceed valid_lft.
+ *
* Use age calculation as in addrconf_verify to avoid unnecessary
* temporary addresses being generated.
*/
age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
- if (cfg.preferred_lft <= regen_advance + age) {
+ if (cfg.preferred_lft <= regen_advance + age)
+ cfg.preferred_lft = regen_advance + age + 1;
+ if (cfg.preferred_lft > cfg.valid_lft) {
in6_ifa_put(ifp);
in6_dev_put(idev);
ret = -1;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 571c10fb00b1..a722a43dd668 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -164,7 +164,13 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
int err;
skb_mark_not_on_list(segs);
- err = ip6_fragment(net, sk, segs, ip6_finish_output2);
+ /* Last GSO segment can be smaller than gso_size (and MTU).
+ * Adding a fragment header would produce an "atomic fragment",
+ * which is considered harmful (RFC-8021). Avoid that.
+ */
+ err = segs->len > mtu ?
+ ip6_fragment(net, sk, segs, ip6_finish_output2) :
+ ip6_finish_output2(net, sk, segs);
if (err && ret == 0)
ret = err;
}
@@ -172,6 +178,16 @@ ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
return ret;
}
+static int ip6_finish_output_gso(struct net *net, struct sock *sk,
+ struct sk_buff *skb, unsigned int mtu)
+{
+ if (!(IP6CB(skb)->flags & IP6SKB_FAKEJUMBO) &&
+ !skb_gso_validate_network_len(skb, mtu))
+ return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
+
+ return ip6_finish_output2(net, sk, skb);
+}
+
static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
unsigned int mtu;
@@ -185,17 +201,14 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
#endif
mtu = ip6_skb_dst_mtu(skb);
- if (skb_is_gso(skb) &&
- !(IP6CB(skb)->flags & IP6SKB_FAKEJUMBO) &&
- !skb_gso_validate_network_len(skb, mtu))
- return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
+ if (skb_is_gso(skb))
+ return ip6_finish_output_gso(net, sk, skb, mtu);
- if ((skb->len > mtu && !skb_is_gso(skb)) ||
- dst_allfrag(skb_dst(skb)) ||
+ if (skb->len > mtu ||
(IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
return ip6_fragment(net, sk, skb, ip6_finish_output2);
- else
- return ip6_finish_output2(net, sk, skb);
+
+ return ip6_finish_output2(net, sk, skb);
}
static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -1017,9 +1030,6 @@ slow_path:
return err;
fail_toobig:
- if (skb->sk && dst_allfrag(skb_dst(skb)))
- sk_gso_disable(skb->sk);
-
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
err = -EMSGSIZE;
@@ -1283,74 +1293,6 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
}
EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
-/**
- * ip6_dst_lookup_tunnel - perform route lookup on tunnel
- * @skb: Packet for which lookup is done
- * @dev: Tunnel device
- * @net: Network namespace of tunnel device
- * @sock: Socket which provides route info
- * @saddr: Memory to store the src ip address
- * @info: Tunnel information
- * @protocol: IP protocol
- * @use_cache: Flag to enable cache usage
- * This function performs a route lookup on a tunnel
- *
- * It returns a valid dst pointer and stores src address to be used in
- * tunnel in param saddr on success, else a pointer encoded error code.
- */
-
-struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
- struct net_device *dev,
- struct net *net,
- struct socket *sock,
- struct in6_addr *saddr,
- const struct ip_tunnel_info *info,
- u8 protocol,
- bool use_cache)
-{
- struct dst_entry *dst = NULL;
-#ifdef CONFIG_DST_CACHE
- struct dst_cache *dst_cache;
-#endif
- struct flowi6 fl6;
- __u8 prio;
-
-#ifdef CONFIG_DST_CACHE
- dst_cache = (struct dst_cache *)&info->dst_cache;
- if (use_cache) {
- dst = dst_cache_get_ip6(dst_cache, saddr);
- if (dst)
- return dst;
- }
-#endif
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_mark = skb->mark;
- fl6.flowi6_proto = protocol;
- fl6.daddr = info->key.u.ipv6.dst;
- fl6.saddr = info->key.u.ipv6.src;
- prio = info->key.tos;
- fl6.flowlabel = ip6_make_flowinfo(prio, info->key.label);
-
- dst = ipv6_stub->ipv6_dst_lookup_flow(net, sock->sk, &fl6,
- NULL);
- if (IS_ERR(dst)) {
- netdev_dbg(dev, "no route to %pI6\n", &fl6.daddr);
- return ERR_PTR(-ENETUNREACH);
- }
- if (dst->dev == dev) { /* is this necessary? */
- netdev_dbg(dev, "circular route to %pI6\n", &fl6.daddr);
- dst_release(dst);
- return ERR_PTR(-ELOOP);
- }
-#ifdef CONFIG_DST_CACHE
- if (use_cache)
- dst_cache_set_ip6(dst_cache, dst, &fl6.saddr);
-#endif
- *saddr = fl6.saddr;
- return dst;
-}
-EXPORT_SYMBOL_GPL(ip6_dst_lookup_tunnel);
-
static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
gfp_t gfp)
{
@@ -1452,10 +1394,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
cork->base.mark = ipc6->sockc.mark;
sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
- if (dst_allfrag(xfrm_dst_path(&rt->dst)))
- cork->base.flags |= IPCORK_ALLFRAG;
cork->base.length = 0;
-
cork->base.transmit_time = ipc6->sockc.transmit_time;
return 0;
@@ -1512,8 +1451,6 @@ static int __ip6_append_data(struct sock *sk,
headersize = sizeof(struct ipv6hdr) +
(opt ? opt->opt_flen + opt->opt_nflen : 0) +
- (dst_allfrag(&rt->dst) ?
- sizeof(struct frag_hdr) : 0) +
rt->rt6i_nfheader_len;
if (mtu <= fragheaderlen ||
@@ -1623,7 +1560,7 @@ emsgsize:
while (length > 0) {
/* Check if the remaining data fits into current packet. */
- copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
+ copy = (cork->length <= mtu ? mtu : maxfraglen) - skb->len;
if (copy < length)
copy = maxfraglen - skb->len;
@@ -1654,7 +1591,7 @@ alloc_new_skb:
*/
datalen = length + fraggap;
- if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+ if (datalen > (cork->length <= mtu ? mtu : maxfraglen) - fragheaderlen)
datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
fraglen = datalen + fragheaderlen;
pagedlen = 0;
@@ -1903,7 +1840,6 @@ static void ip6_cork_steal_dst(struct sk_buff *skb, struct inet_cork_full *cork)
struct dst_entry *dst = cork->base.dst;
cork->base.dst = NULL;
- cork->base.flags &= ~IPCORK_ALLFRAG;
skb_dst_set(skb, dst);
}
@@ -1924,7 +1860,6 @@ static void ip6_cork_release(struct inet_cork_full *cork,
if (cork->base.dst) {
dst_release(cork->base.dst);
cork->base.dst = NULL;
- cork->base.flags &= ~IPCORK_ALLFRAG;
}
}
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index 70d38705c92f..a7bf0327b380 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -1,3 +1,4 @@
+
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/errno.h>
@@ -112,4 +113,73 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
}
EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb);
+/**
+ * udp_tunnel6_dst_lookup - perform route lookup on UDP tunnel
+ * @skb: Packet for which lookup is done
+ * @dev: Tunnel device
+ * @net: Network namespace of tunnel device
+ * @sock: Socket which provides route info
+ * @oif: Index of the output interface
+ * @saddr: Memory to store the src ip address
+ * @key: Tunnel information
+ * @sport: UDP source port
+ * @dport: UDP destination port
+ * @dsfield: The traffic class field
+ * @dst_cache: The dst cache to use for lookup
+ * This function performs a route lookup on a UDP tunnel
+ *
+ * It returns a valid dst pointer and stores src address to be used in
+ * tunnel in param saddr on success, else a pointer encoded error code.
+ */
+
+struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net,
+ struct socket *sock,
+ int oif,
+ struct in6_addr *saddr,
+ const struct ip_tunnel_key *key,
+ __be16 sport, __be16 dport, u8 dsfield,
+ struct dst_cache *dst_cache)
+{
+ struct dst_entry *dst = NULL;
+ struct flowi6 fl6;
+
+#ifdef CONFIG_DST_CACHE
+ if (dst_cache) {
+ dst = dst_cache_get_ip6(dst_cache, saddr);
+ if (dst)
+ return dst;
+ }
+#endif
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_proto = IPPROTO_UDP;
+ fl6.flowi6_oif = oif;
+ fl6.daddr = key->u.ipv6.dst;
+ fl6.saddr = key->u.ipv6.src;
+ fl6.fl6_sport = sport;
+ fl6.fl6_dport = dport;
+ fl6.flowlabel = ip6_make_flowinfo(dsfield, key->label);
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, sock->sk, &fl6,
+ NULL);
+ if (IS_ERR(dst)) {
+ netdev_dbg(dev, "no route to %pI6\n", &fl6.daddr);
+ return ERR_PTR(-ENETUNREACH);
+ }
+ if (dst->dev == dev) { /* is this necessary? */
+ netdev_dbg(dev, "circular route to %pI6\n", &fl6.daddr);
+ dst_release(dst);
+ return ERR_PTR(-ELOOP);
+ }
+#ifdef CONFIG_DST_CACHE
+ if (dst_cache)
+ dst_cache_set_ip6(dst_cache, dst, &fl6.saddr);
+#endif
+ *saddr = fl6.saddr;
+ return dst;
+}
+EXPORT_SYMBOL_GPL(udp_tunnel6_dst_lookup);
+
MODULE_LICENSE("GPL");
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d410703bb5a1..dc27988512a6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -286,6 +286,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
goto failure;
}
+ tp->tcp_usec_ts = dst_tcp_usec_ts(dst);
tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
if (!saddr) {
@@ -1096,7 +1097,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
- tcp_time_stamp_raw() + tcptw->tw_ts_offset,
+ tcp_tw_tsval(tcptw),
tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority,
tw->tw_txhash);
@@ -1123,7 +1124,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
- tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
+ tcp_rsk_tsval(tcp_rsk(req)),
READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
ipv6_get_dsfield(ipv6_hdr(skb)), 0,
@@ -1894,7 +1895,6 @@ const struct inet_connection_sock_af_ops ipv6_specific = {
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
.net_header_len = sizeof(struct ipv6hdr),
- .net_frag_header_len = sizeof(struct frag_hdr),
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index ad07904642ca..5f7b1fdbffe6 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -95,7 +95,7 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
return -EMSGSIZE;
}
- if (toobig || dst_allfrag(skb_dst(skb)))
+ if (toobig)
return ip6_fragment(net, sk, skb,
__xfrm6_output_finish);
diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile
index 84e531f86b82..bcf1dbf3a432 100644
--- a/net/mptcp/Makefile
+++ b/net/mptcp/Makefile
@@ -2,7 +2,8 @@
obj-$(CONFIG_MPTCP) += mptcp.o
mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \
- mib.o pm_netlink.o sockopt.o pm_userspace.o fastopen.o sched.o
+ mib.o pm_netlink.o sockopt.o pm_userspace.o fastopen.o sched.o \
+ mptcp_pm_gen.o
obj-$(CONFIG_SYN_COOKIES) += syncookies.o
obj-$(CONFIG_INET_MPTCP_DIAG) += mptcp_diag.o
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index e72b518c5d02..13fe0748dde8 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -27,6 +27,7 @@ struct mptcp_pernet {
#endif
unsigned int add_addr_timeout;
+ unsigned int close_timeout;
unsigned int stale_loss_cnt;
u8 mptcp_enabled;
u8 checksum_enabled;
@@ -65,6 +66,13 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net)
return mptcp_get_pernet(net)->stale_loss_cnt;
}
+unsigned int mptcp_close_timeout(const struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_DEAD))
+ return TCP_TIMEWAIT_LEN;
+ return mptcp_get_pernet(sock_net(sk))->close_timeout;
+}
+
int mptcp_get_pm_type(const struct net *net)
{
return mptcp_get_pernet(net)->pm_type;
@@ -79,6 +87,7 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
{
pernet->mptcp_enabled = 1;
pernet->add_addr_timeout = TCP_RTO_MAX;
+ pernet->close_timeout = TCP_TIMEWAIT_LEN;
pernet->checksum_enabled = 0;
pernet->allow_join_initial_addr_port = 1;
pernet->stale_loss_cnt = 4;
@@ -141,6 +150,12 @@ static struct ctl_table mptcp_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dostring,
},
+ {
+ .procname = "close_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
{}
};
@@ -163,6 +178,7 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
table[4].data = &pernet->stale_loss_cnt;
table[5].data = &pernet->pm_type;
table[6].data = &pernet->scheduler;
+ table[7].data = &pernet->close_timeout;
hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table,
ARRAY_SIZE(mptcp_sysctl_table));
diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
index bceaab8dd8e4..74698582a285 100644
--- a/net/mptcp/fastopen.c
+++ b/net/mptcp/fastopen.c
@@ -52,6 +52,7 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
mptcp_set_owner_r(skb, sk);
__skb_queue_tail(&sk->sk_receive_queue, skb);
+ mptcp_sk(sk)->bytes_received += skb->len;
sk->sk_data_ready(sk);
diff --git a/net/mptcp/mptcp_pm_gen.c b/net/mptcp/mptcp_pm_gen.c
new file mode 100644
index 000000000000..a2325e70ddab
--- /dev/null
+++ b/net/mptcp/mptcp_pm_gen.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/mptcp.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "mptcp_pm_gen.h"
+
+#include <uapi/linux/mptcp_pm.h>
+
+/* Common nested types */
+const struct nla_policy mptcp_pm_address_nl_policy[MPTCP_PM_ADDR_ATTR_IF_IDX + 1] = {
+ [MPTCP_PM_ADDR_ATTR_FAMILY] = { .type = NLA_U16, },
+ [MPTCP_PM_ADDR_ATTR_ID] = { .type = NLA_U8, },
+ [MPTCP_PM_ADDR_ATTR_ADDR4] = { .type = NLA_U32, },
+ [MPTCP_PM_ADDR_ATTR_ADDR6] = NLA_POLICY_EXACT_LEN(16),
+ [MPTCP_PM_ADDR_ATTR_PORT] = { .type = NLA_U16, },
+ [MPTCP_PM_ADDR_ATTR_FLAGS] = { .type = NLA_U32, },
+ [MPTCP_PM_ADDR_ATTR_IF_IDX] = { .type = NLA_S32, },
+};
+
+/* MPTCP_PM_CMD_ADD_ADDR - do */
+const struct nla_policy mptcp_pm_add_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1] = {
+ [MPTCP_PM_ENDPOINT_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+};
+
+/* MPTCP_PM_CMD_DEL_ADDR - do */
+const struct nla_policy mptcp_pm_del_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1] = {
+ [MPTCP_PM_ENDPOINT_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+};
+
+/* MPTCP_PM_CMD_GET_ADDR - do */
+const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1] = {
+ [MPTCP_PM_ENDPOINT_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+};
+
+/* MPTCP_PM_CMD_FLUSH_ADDRS - do */
+const struct nla_policy mptcp_pm_flush_addrs_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1] = {
+ [MPTCP_PM_ENDPOINT_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+};
+
+/* MPTCP_PM_CMD_SET_LIMITS - do */
+const struct nla_policy mptcp_pm_set_limits_nl_policy[MPTCP_PM_ATTR_SUBFLOWS + 1] = {
+ [MPTCP_PM_ATTR_RCV_ADD_ADDRS] = { .type = NLA_U32, },
+ [MPTCP_PM_ATTR_SUBFLOWS] = { .type = NLA_U32, },
+};
+
+/* MPTCP_PM_CMD_GET_LIMITS - do */
+const struct nla_policy mptcp_pm_get_limits_nl_policy[MPTCP_PM_ATTR_SUBFLOWS + 1] = {
+ [MPTCP_PM_ATTR_RCV_ADD_ADDRS] = { .type = NLA_U32, },
+ [MPTCP_PM_ATTR_SUBFLOWS] = { .type = NLA_U32, },
+};
+
+/* MPTCP_PM_CMD_SET_FLAGS - do */
+const struct nla_policy mptcp_pm_set_flags_nl_policy[MPTCP_PM_ATTR_ADDR_REMOTE + 1] = {
+ [MPTCP_PM_ATTR_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+ [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
+ [MPTCP_PM_ATTR_ADDR_REMOTE] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+};
+
+/* MPTCP_PM_CMD_ANNOUNCE - do */
+const struct nla_policy mptcp_pm_announce_nl_policy[MPTCP_PM_ATTR_TOKEN + 1] = {
+ [MPTCP_PM_ATTR_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+ [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
+};
+
+/* MPTCP_PM_CMD_REMOVE - do */
+const struct nla_policy mptcp_pm_remove_nl_policy[MPTCP_PM_ATTR_LOC_ID + 1] = {
+ [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
+ [MPTCP_PM_ATTR_LOC_ID] = { .type = NLA_U8, },
+};
+
+/* MPTCP_PM_CMD_SUBFLOW_CREATE - do */
+const struct nla_policy mptcp_pm_subflow_create_nl_policy[MPTCP_PM_ATTR_ADDR_REMOTE + 1] = {
+ [MPTCP_PM_ATTR_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+ [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
+ [MPTCP_PM_ATTR_ADDR_REMOTE] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+};
+
+/* MPTCP_PM_CMD_SUBFLOW_DESTROY - do */
+const struct nla_policy mptcp_pm_subflow_destroy_nl_policy[MPTCP_PM_ATTR_ADDR_REMOTE + 1] = {
+ [MPTCP_PM_ATTR_ADDR] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+ [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
+ [MPTCP_PM_ATTR_ADDR_REMOTE] = NLA_POLICY_NESTED(mptcp_pm_address_nl_policy),
+};
+
+/* Ops table for mptcp_pm */
+const struct genl_ops mptcp_pm_nl_ops[11] = {
+ {
+ .cmd = MPTCP_PM_CMD_ADD_ADDR,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_add_addr_doit,
+ .policy = mptcp_pm_add_addr_nl_policy,
+ .maxattr = MPTCP_PM_ENDPOINT_ADDR,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_DEL_ADDR,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_del_addr_doit,
+ .policy = mptcp_pm_del_addr_nl_policy,
+ .maxattr = MPTCP_PM_ENDPOINT_ADDR,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_GET_ADDR,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_get_addr_doit,
+ .dumpit = mptcp_pm_nl_get_addr_dumpit,
+ .policy = mptcp_pm_get_addr_nl_policy,
+ .maxattr = MPTCP_PM_ENDPOINT_ADDR,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_FLUSH_ADDRS,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_flush_addrs_doit,
+ .policy = mptcp_pm_flush_addrs_nl_policy,
+ .maxattr = MPTCP_PM_ENDPOINT_ADDR,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_SET_LIMITS,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_set_limits_doit,
+ .policy = mptcp_pm_set_limits_nl_policy,
+ .maxattr = MPTCP_PM_ATTR_SUBFLOWS,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_GET_LIMITS,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_get_limits_doit,
+ .policy = mptcp_pm_get_limits_nl_policy,
+ .maxattr = MPTCP_PM_ATTR_SUBFLOWS,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_SET_FLAGS,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_set_flags_doit,
+ .policy = mptcp_pm_set_flags_nl_policy,
+ .maxattr = MPTCP_PM_ATTR_ADDR_REMOTE,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_ANNOUNCE,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_announce_doit,
+ .policy = mptcp_pm_announce_nl_policy,
+ .maxattr = MPTCP_PM_ATTR_TOKEN,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_REMOVE,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_remove_doit,
+ .policy = mptcp_pm_remove_nl_policy,
+ .maxattr = MPTCP_PM_ATTR_LOC_ID,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_SUBFLOW_CREATE,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_subflow_create_doit,
+ .policy = mptcp_pm_subflow_create_nl_policy,
+ .maxattr = MPTCP_PM_ATTR_ADDR_REMOTE,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+ {
+ .cmd = MPTCP_PM_CMD_SUBFLOW_DESTROY,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = mptcp_pm_nl_subflow_destroy_doit,
+ .policy = mptcp_pm_subflow_destroy_nl_policy,
+ .maxattr = MPTCP_PM_ATTR_ADDR_REMOTE,
+ .flags = GENL_UNS_ADMIN_PERM,
+ },
+};
diff --git a/net/mptcp/mptcp_pm_gen.h b/net/mptcp/mptcp_pm_gen.h
new file mode 100644
index 000000000000..10579d184587
--- /dev/null
+++ b/net/mptcp/mptcp_pm_gen.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/mptcp.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_MPTCP_PM_GEN_H
+#define _LINUX_MPTCP_PM_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/mptcp_pm.h>
+
+/* Common nested types */
+extern const struct nla_policy mptcp_pm_address_nl_policy[MPTCP_PM_ADDR_ATTR_IF_IDX + 1];
+
+extern const struct nla_policy mptcp_pm_add_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
+
+extern const struct nla_policy mptcp_pm_del_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
+
+extern const struct nla_policy mptcp_pm_get_addr_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
+
+extern const struct nla_policy mptcp_pm_flush_addrs_nl_policy[MPTCP_PM_ENDPOINT_ADDR + 1];
+
+extern const struct nla_policy mptcp_pm_set_limits_nl_policy[MPTCP_PM_ATTR_SUBFLOWS + 1];
+
+extern const struct nla_policy mptcp_pm_get_limits_nl_policy[MPTCP_PM_ATTR_SUBFLOWS + 1];
+
+extern const struct nla_policy mptcp_pm_set_flags_nl_policy[MPTCP_PM_ATTR_ADDR_REMOTE + 1];
+
+extern const struct nla_policy mptcp_pm_announce_nl_policy[MPTCP_PM_ATTR_TOKEN + 1];
+
+extern const struct nla_policy mptcp_pm_remove_nl_policy[MPTCP_PM_ATTR_LOC_ID + 1];
+
+extern const struct nla_policy mptcp_pm_subflow_create_nl_policy[MPTCP_PM_ATTR_ADDR_REMOTE + 1];
+
+extern const struct nla_policy mptcp_pm_subflow_destroy_nl_policy[MPTCP_PM_ATTR_ADDR_REMOTE + 1];
+
+/* Ops table for mptcp_pm */
+extern const struct genl_ops mptcp_pm_nl_ops[11];
+
+int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info);
+int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb,
+ struct genl_info *info);
+
+#endif /* _LINUX_MPTCP_PM_GEN_H */
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 9661f3812682..1529ec358815 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -1104,29 +1104,6 @@ static const struct genl_multicast_group mptcp_pm_mcgrps[] = {
},
};
-static const struct nla_policy
-mptcp_pm_addr_policy[MPTCP_PM_ADDR_ATTR_MAX + 1] = {
- [MPTCP_PM_ADDR_ATTR_FAMILY] = { .type = NLA_U16, },
- [MPTCP_PM_ADDR_ATTR_ID] = { .type = NLA_U8, },
- [MPTCP_PM_ADDR_ATTR_ADDR4] = { .type = NLA_U32, },
- [MPTCP_PM_ADDR_ATTR_ADDR6] =
- NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
- [MPTCP_PM_ADDR_ATTR_PORT] = { .type = NLA_U16 },
- [MPTCP_PM_ADDR_ATTR_FLAGS] = { .type = NLA_U32 },
- [MPTCP_PM_ADDR_ATTR_IF_IDX] = { .type = NLA_S32 },
-};
-
-static const struct nla_policy mptcp_pm_policy[MPTCP_PM_ATTR_MAX + 1] = {
- [MPTCP_PM_ATTR_ADDR] =
- NLA_POLICY_NESTED(mptcp_pm_addr_policy),
- [MPTCP_PM_ATTR_RCV_ADD_ADDRS] = { .type = NLA_U32, },
- [MPTCP_PM_ATTR_SUBFLOWS] = { .type = NLA_U32, },
- [MPTCP_PM_ATTR_TOKEN] = { .type = NLA_U32, },
- [MPTCP_PM_ATTR_LOC_ID] = { .type = NLA_U8, },
- [MPTCP_PM_ATTR_ADDR_REMOTE] =
- NLA_POLICY_NESTED(mptcp_pm_addr_policy),
-};
-
void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
{
struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk);
@@ -1188,7 +1165,7 @@ static int mptcp_pm_parse_pm_addr_attr(struct nlattr *tb[],
/* no validation needed - was already done via nested policy */
err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
- mptcp_pm_addr_policy, info->extack);
+ mptcp_pm_address_nl_policy, info->extack);
if (err)
return err;
@@ -1303,9 +1280,9 @@ next:
return 0;
}
-static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
struct mptcp_pm_addr_entry addr, *entry;
int ret;
@@ -1484,9 +1461,9 @@ next:
return 0;
}
-static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
struct mptcp_pm_addr_entry addr, *entry;
unsigned int addr_max;
@@ -1619,7 +1596,7 @@ static void __reset_counters(struct pm_nl_pernet *pernet)
pernet->addrs = 0;
}
-static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info)
{
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
LIST_HEAD(free_list);
@@ -1675,9 +1652,9 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int mptcp_nl_cmd_get_addr(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
+ struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR];
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
struct mptcp_pm_addr_entry addr, *entry;
struct sk_buff *msg;
@@ -1725,8 +1702,8 @@ fail:
return ret;
}
-static int mptcp_nl_cmd_dump_addrs(struct sk_buff *msg,
- struct netlink_callback *cb)
+int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
{
struct net *net = sock_net(msg->sk);
struct mptcp_pm_addr_entry *entry;
@@ -1783,8 +1760,7 @@ static int parse_limit(struct genl_info *info, int id, unsigned int *limit)
return 0;
}
-static int
-mptcp_nl_cmd_set_limits(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info)
{
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
unsigned int rcv_addrs, subflows;
@@ -1809,8 +1785,7 @@ unlock:
return ret;
}
-static int
-mptcp_nl_cmd_get_limits(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info)
{
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
struct sk_buff *msg;
@@ -1919,7 +1894,7 @@ int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8
return 0;
}
-static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info)
{
struct mptcp_pm_addr_entry remote = { .addr = { .family = AF_UNSPEC }, };
struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, };
@@ -2283,72 +2258,13 @@ nla_put_failure:
nlmsg_free(skb);
}
-static const struct genl_small_ops mptcp_pm_ops[] = {
- {
- .cmd = MPTCP_PM_CMD_ADD_ADDR,
- .doit = mptcp_nl_cmd_add_addr,
- .flags = GENL_UNS_ADMIN_PERM,
- },
- {
- .cmd = MPTCP_PM_CMD_DEL_ADDR,
- .doit = mptcp_nl_cmd_del_addr,
- .flags = GENL_UNS_ADMIN_PERM,
- },
- {
- .cmd = MPTCP_PM_CMD_FLUSH_ADDRS,
- .doit = mptcp_nl_cmd_flush_addrs,
- .flags = GENL_UNS_ADMIN_PERM,
- },
- {
- .cmd = MPTCP_PM_CMD_GET_ADDR,
- .doit = mptcp_nl_cmd_get_addr,
- .dumpit = mptcp_nl_cmd_dump_addrs,
- },
- {
- .cmd = MPTCP_PM_CMD_SET_LIMITS,
- .doit = mptcp_nl_cmd_set_limits,
- .flags = GENL_UNS_ADMIN_PERM,
- },
- {
- .cmd = MPTCP_PM_CMD_GET_LIMITS,
- .doit = mptcp_nl_cmd_get_limits,
- },
- {
- .cmd = MPTCP_PM_CMD_SET_FLAGS,
- .doit = mptcp_nl_cmd_set_flags,
- .flags = GENL_UNS_ADMIN_PERM,
- },
- {
- .cmd = MPTCP_PM_CMD_ANNOUNCE,
- .doit = mptcp_nl_cmd_announce,
- .flags = GENL_UNS_ADMIN_PERM,
- },
- {
- .cmd = MPTCP_PM_CMD_REMOVE,
- .doit = mptcp_nl_cmd_remove,
- .flags = GENL_UNS_ADMIN_PERM,
- },
- {
- .cmd = MPTCP_PM_CMD_SUBFLOW_CREATE,
- .doit = mptcp_nl_cmd_sf_create,
- .flags = GENL_UNS_ADMIN_PERM,
- },
- {
- .cmd = MPTCP_PM_CMD_SUBFLOW_DESTROY,
- .doit = mptcp_nl_cmd_sf_destroy,
- .flags = GENL_UNS_ADMIN_PERM,
- },
-};
-
static struct genl_family mptcp_genl_family __ro_after_init = {
.name = MPTCP_PM_NAME,
.version = MPTCP_PM_VER,
- .maxattr = MPTCP_PM_ATTR_MAX,
- .policy = mptcp_pm_policy,
.netnsok = true,
.module = THIS_MODULE,
- .small_ops = mptcp_pm_ops,
- .n_small_ops = ARRAY_SIZE(mptcp_pm_ops),
+ .ops = mptcp_pm_nl_ops,
+ .n_ops = ARRAY_SIZE(mptcp_pm_nl_ops),
.resv_start_op = MPTCP_PM_CMD_SUBFLOW_DESTROY + 1,
.mcgrps = mptcp_pm_mcgrps,
.n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps),
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index d042d32beb4d..0f92e5b13a8a 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -145,7 +145,7 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry);
}
-int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
struct nlattr *addr = info->attrs[MPTCP_PM_ATTR_ADDR];
@@ -208,7 +208,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
return err;
}
-int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
struct nlattr *id = info->attrs[MPTCP_PM_ATTR_LOC_ID];
@@ -270,7 +270,7 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
return err;
}
-int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
@@ -394,7 +394,7 @@ static struct sock *mptcp_nl_find_ssk(struct mptcp_sock *msk,
return NULL;
}
-int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
+int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 886ab689a8ae..1dacc072dcca 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -121,8 +121,6 @@ struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk)
ret = __mptcp_socket_create(msk);
if (ret)
return ERR_PTR(ret);
-
- mptcp_sockopt_sync(msk, msk->first);
}
return msk->first;
@@ -863,9 +861,8 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
/* Wake-up the reader only for in-sequence data */
mptcp_data_lock(sk);
- if (move_skbs_to_msk(msk, ssk))
+ if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
sk->sk_data_ready(sk);
-
mptcp_data_unlock(sk);
}
@@ -893,6 +890,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
mptcp_sockopt_sync_locked(msk, ssk);
mptcp_subflow_joined(msk, ssk);
mptcp_stop_tout_timer(sk);
+ __mptcp_propagate_sndbuf(sk, ssk);
return true;
}
@@ -1079,15 +1077,16 @@ static void mptcp_enter_memory_pressure(struct sock *sk)
struct mptcp_sock *msk = mptcp_sk(sk);
bool first = true;
- sk_stream_moderate_sndbuf(sk);
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
if (first)
tcp_enter_memory_pressure(ssk);
sk_stream_moderate_sndbuf(ssk);
+
first = false;
}
+ __mptcp_sync_sndbuf(sk);
}
/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
@@ -1761,6 +1760,18 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
return ret;
}
+static int do_copy_data_nocache(struct sock *sk, int copy,
+ struct iov_iter *from, char *to)
+{
+ if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
+ if (!copy_from_iter_full_nocache(to, copy, from))
+ return -EFAULT;
+ } else if (!copy_from_iter_full(to, copy, from)) {
+ return -EFAULT;
+ }
+ return 0;
+}
+
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1834,11 +1845,10 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (!sk_wmem_schedule(sk, total_ts))
goto wait_for_memory;
- if (copy_page_from_iter(dfrag->page, offset, psize,
- &msg->msg_iter) != psize) {
- ret = -EFAULT;
+ ret = do_copy_data_nocache(sk, psize, &msg->msg_iter,
+ page_address(dfrag->page) + offset);
+ if (ret)
goto do_error;
- }
/* data successfully copied into the write queue */
sk_forward_alloc_add(sk, -total_ts);
@@ -1922,6 +1932,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
if (!(flags & MSG_PEEK)) {
MPTCP_SKB_CB(skb)->offset += count;
MPTCP_SKB_CB(skb)->map_seq += count;
+ msk->bytes_consumed += count;
}
break;
}
@@ -1932,6 +1943,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
__skb_unlink(skb, &msk->receive_queue);
__kfree_skb(skb);
+ msk->bytes_consumed += count;
}
if (copied >= len)
@@ -2391,8 +2403,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
if (msk->in_accept_queue && msk->first == ssk &&
(sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) {
/* ensure later check in mptcp_worker() will dispose the msk */
- mptcp_set_close_tout(sk, tcp_jiffies32 - (TCP_TIMEWAIT_LEN + 1));
sock_set_flag(sk, SOCK_DEAD);
+ mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1));
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
mptcp_subflow_drop_ctx(ssk);
goto out_release;
@@ -2448,6 +2460,7 @@ out_release:
WRITE_ONCE(msk->first, NULL);
out:
+ __mptcp_sync_sndbuf(sk);
if (need_push)
__mptcp_push_pending(sk, 0);
@@ -2516,7 +2529,7 @@ static bool mptcp_close_tout_expired(const struct sock *sk)
return false;
return time_after32(tcp_jiffies32,
- inet_csk(sk)->icsk_mtup.probe_timestamp + TCP_TIMEWAIT_LEN);
+ inet_csk(sk)->icsk_mtup.probe_timestamp + mptcp_close_timeout(sk));
}
static void mptcp_check_fastclose(struct mptcp_sock *msk)
@@ -2659,7 +2672,7 @@ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
return;
close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies +
- TCP_TIMEWAIT_LEN;
+ mptcp_close_timeout(sk);
/* the close timeout takes precedence on the fail one, and here at least one of
* them is active
@@ -2755,6 +2768,7 @@ static void __mptcp_init_sock(struct sock *sk)
msk->rmem_fwd_alloc = 0;
WRITE_ONCE(msk->rmem_released, 0);
msk->timer_ival = TCP_RTO_MIN;
+ msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
WRITE_ONCE(msk->first, NULL);
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
@@ -2964,16 +2978,9 @@ void __mptcp_unaccepted_force_close(struct sock *sk)
__mptcp_destroy_sock(sk);
}
-static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
+static __poll_t mptcp_check_readable(struct sock *sk)
{
- /* Concurrent splices from sk_receive_queue into receive_queue will
- * always show at least one non-empty queue when checked in this order.
- */
- if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
- skb_queue_empty_lockless(&msk->receive_queue))
- return 0;
-
- return EPOLLIN | EPOLLRDNORM;
+ return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0;
}
static void mptcp_check_listen_stop(struct sock *sk)
@@ -3011,7 +3018,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
goto cleanup;
}
- if (mptcp_check_readable(msk) || timeout < 0) {
+ if (mptcp_data_avail(msk) || timeout < 0) {
/* If the msk has read data, or the caller explicitly ask it,
* do the MPTCP equivalent of TCP reset, aka MPTCP fastclose
*/
@@ -3138,6 +3145,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
msk->snd_data_fin_enable = false;
msk->rcv_fastclose = false;
msk->use_64bit_ack = false;
+ msk->bytes_consumed = 0;
WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
mptcp_pm_data_reset(msk);
mptcp_ca_reset(sk);
@@ -3219,7 +3227,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
* uses the correct data
*/
mptcp_copy_inaddrs(nsk, ssk);
- mptcp_propagate_sndbuf(nsk, ssk);
+ __mptcp_propagate_sndbuf(nsk, ssk);
mptcp_rcv_space_init(msk, ssk);
bh_unlock_sock(nsk);
@@ -3397,6 +3405,8 @@ static void mptcp_release_cb(struct sock *sk)
__mptcp_set_connected(sk);
if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
__mptcp_error_report(sk);
+ if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags))
+ __mptcp_sync_sndbuf(sk);
}
__mptcp_update_rmem(sk);
@@ -3441,6 +3451,14 @@ void mptcp_subflow_process_delegated(struct sock *ssk, long status)
__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
mptcp_data_unlock(sk);
}
+ if (status & BIT(MPTCP_DELEGATE_SNDBUF)) {
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_sync_sndbuf(sk);
+ else
+ __set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags);
+ mptcp_data_unlock(sk);
+ }
if (status & BIT(MPTCP_DELEGATE_ACK))
schedule_3rdack_retransmission(ssk);
}
@@ -3525,6 +3543,7 @@ bool mptcp_finish_join(struct sock *ssk)
/* active subflow, already present inside the conn_list */
if (!list_empty(&subflow->node)) {
mptcp_subflow_joined(msk, ssk);
+ mptcp_propagate_sndbuf(parent, ssk);
return true;
}
@@ -3909,7 +3928,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
- mask |= mptcp_check_readable(msk);
+ mask |= mptcp_check_readable(sk);
if (shutdown & SEND_SHUTDOWN)
mask |= EPOLLOUT | EPOLLWRNORM;
else
@@ -3947,6 +3966,7 @@ static const struct proto_ops mptcp_stream_ops = {
.sendmsg = inet_sendmsg,
.recvmsg = inet_recvmsg,
.mmap = sock_no_mmap,
+ .set_rcvlowat = mptcp_set_rcvlowat,
};
static struct inet_protosw mptcp_protosw = {
@@ -4048,6 +4068,7 @@ static const struct proto_ops mptcp_v6_stream_ops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = inet6_compat_ioctl,
#endif
+ .set_rcvlowat = mptcp_set_rcvlowat,
};
static struct proto mptcp_v6_prot;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 3612545fa62e..9092fcf18798 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -13,6 +13,8 @@
#include <uapi/linux/mptcp.h>
#include <net/genetlink.h>
+#include "mptcp_pm_gen.h"
+
#define MPTCP_SUPPORTED_VERSION 1
/* MPTCP option bits */
@@ -123,6 +125,7 @@
#define MPTCP_RETRANSMIT 4
#define MPTCP_FLUSH_JOIN_LIST 5
#define MPTCP_CONNECTED 6
+#define MPTCP_SYNC_SNDBUF 7
struct mptcp_skb_cb {
u64 map_seq;
@@ -267,6 +270,7 @@ struct mptcp_sock {
atomic64_t rcv_wnd_sent;
u64 rcv_data_fin_seq;
u64 bytes_retrans;
+ u64 bytes_consumed;
int rmem_fwd_alloc;
int snd_burst;
int old_wspace;
@@ -432,11 +436,6 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
return (struct mptcp_subflow_request_sock *)rsk;
}
-enum mptcp_data_avail {
- MPTCP_SUBFLOW_NODATA,
- MPTCP_SUBFLOW_DATA_AVAIL,
-};
-
struct mptcp_delegated_action {
struct napi_struct napi;
struct list_head head;
@@ -447,6 +446,7 @@ DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
#define MPTCP_DELEGATE_SCHEDULED 0
#define MPTCP_DELEGATE_SEND 1
#define MPTCP_DELEGATE_ACK 2
+#define MPTCP_DELEGATE_SNDBUF 3
#define MPTCP_DELEGATE_ACTIONS_MASK (~BIT(MPTCP_DELEGATE_SCHEDULED))
/* MPTCP subflow context */
@@ -492,7 +492,7 @@ struct mptcp_subflow_context {
valid_csum_seen : 1, /* at least one csum validated */
is_mptfo : 1, /* subflow is doing TFO */
__unused : 9;
- enum mptcp_data_avail data_avail;
+ bool data_avail;
bool scheduled;
u32 remote_nonce;
u64 thmac;
@@ -520,6 +520,9 @@ struct mptcp_subflow_context {
u32 setsockopt_seq;
u32 stale_rcv_tstamp;
+ int cached_sndbuf; /* sndbuf size when last synced with the msk sndbuf,
+ * protected by the msk socket lock
+ */
struct sock *tcp_sock; /* tcp sk backpointer */
struct sock *conn; /* parent mptcp_sock */
@@ -613,6 +616,7 @@ unsigned int mptcp_get_add_addr_timeout(const struct net *net);
int mptcp_is_checksum_enabled(const struct net *net);
int mptcp_allow_join_id0(const struct net *net);
unsigned int mptcp_stale_loss_cnt(const struct net *net);
+unsigned int mptcp_close_timeout(const struct sock *sk);
int mptcp_get_pm_type(const struct net *net);
const char *mptcp_get_scheduler(const struct net *net);
void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
@@ -661,6 +665,24 @@ struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk);
int mptcp_sched_get_send(struct mptcp_sock *msk);
int mptcp_sched_get_retrans(struct mptcp_sock *msk);
+static inline u64 mptcp_data_avail(const struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->bytes_received) - READ_ONCE(msk->bytes_consumed);
+}
+
+static inline bool mptcp_epollin_ready(const struct sock *sk)
+{
+ /* mptcp doesn't have to deal with small skbs in the receive queue,
+ * at it can always coalesce them
+ */
+ return (mptcp_data_avail(mptcp_sk(sk)) >= sk->sk_rcvlowat) ||
+ (mem_cgroup_sockets_enabled && sk->sk_memcg &&
+ mem_cgroup_under_socket_pressure(sk->sk_memcg)) ||
+ READ_ONCE(tcp_memory_pressure);
+}
+
+int mptcp_set_rcvlowat(struct sock *sk, int val);
+
static inline bool __tcp_can_send(const struct sock *ssk)
{
/* only send if our side has not closed yet */
@@ -735,6 +757,7 @@ static inline bool mptcp_is_fully_established(struct sock *sk)
return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
READ_ONCE(mptcp_sk(sk)->fully_established);
}
+
void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk);
void mptcp_data_ready(struct sock *sk, struct sock *ssk);
bool mptcp_finish_join(struct sock *sk);
@@ -762,13 +785,52 @@ static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
READ_ONCE(msk->write_seq) == READ_ONCE(msk->snd_nxt);
}
-static inline bool mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
+static inline void __mptcp_sync_sndbuf(struct sock *sk)
{
- if ((sk->sk_userlocks & SOCK_SNDBUF_LOCK) || ssk->sk_sndbuf <= READ_ONCE(sk->sk_sndbuf))
- return false;
+ struct mptcp_subflow_context *subflow;
+ int ssk_sndbuf, new_sndbuf;
+
+ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+ return;
+
+ new_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[0];
+ mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
+ ssk_sndbuf = READ_ONCE(mptcp_subflow_tcp_sock(subflow)->sk_sndbuf);
+
+ subflow->cached_sndbuf = ssk_sndbuf;
+ new_sndbuf += ssk_sndbuf;
+ }
+
+ /* the msk max wmem limit is <nr_subflows> * tcp wmem[2] */
+ WRITE_ONCE(sk->sk_sndbuf, new_sndbuf);
+}
+
+/* The called held both the msk socket and the subflow socket locks,
+ * possibly under BH
+ */
+static inline void __mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+
+ if (READ_ONCE(ssk->sk_sndbuf) != subflow->cached_sndbuf)
+ __mptcp_sync_sndbuf(sk);
+}
+
+/* the caller held only the subflow socket lock, either in process or
+ * BH context. Additionally this can be called under the msk data lock,
+ * so we can't acquire such lock here: let the delegate action acquires
+ * the needed locks in suitable order.
+ */
+static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+
+ if (likely(READ_ONCE(ssk->sk_sndbuf) == subflow->cached_sndbuf))
+ return;
- WRITE_ONCE(sk->sk_sndbuf, ssk->sk_sndbuf);
- return true;
+ local_bh_disable();
+ mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_SNDBUF);
+ local_bh_enable();
}
static inline void mptcp_write_space(struct sock *sk)
@@ -877,10 +939,6 @@ void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
struct list_head *rm_list);
void mptcp_free_local_addr_list(struct mptcp_sock *msk);
-int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info);
-int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info);
-int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info);
-int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info);
void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
const struct sock *ssk, gfp_t gfp);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 59bd5e114392..574e221bb765 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -95,6 +95,7 @@ static void mptcp_sol_socket_sync_intval(struct mptcp_sock *msk, int optname, in
case SO_SNDBUFFORCE:
ssk->sk_userlocks |= SOCK_SNDBUF_LOCK;
WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
+ mptcp_subflow_ctx(ssk)->cached_sndbuf = sk->sk_sndbuf;
break;
case SO_RCVBUF:
case SO_RCVBUFFORCE:
@@ -1415,8 +1416,10 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
if (sk->sk_userlocks & tx_rx_locks) {
ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks;
- if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) {
WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
+ mptcp_subflow_ctx(ssk)->cached_sndbuf = sk->sk_sndbuf;
+ }
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf);
}
@@ -1444,37 +1447,63 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
}
-static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
-{
- bool slow = lock_sock_fast(ssk);
-
- sync_socket_options(msk, ssk);
-
- unlock_sock_fast(ssk, slow);
-}
-
-void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
+void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
msk_owned_by_me(msk);
+ ssk->sk_rcvlowat = 0;
+
+ /* subflows must ignore any latency-related settings: will not affect
+ * the user-space - only the msk is relevant - but will foul the
+ * mptcp scheduler
+ */
+ tcp_sk(ssk)->notsent_lowat = UINT_MAX;
+
if (READ_ONCE(subflow->setsockopt_seq) != msk->setsockopt_seq) {
- __mptcp_sockopt_sync(msk, ssk);
+ sync_socket_options(msk, ssk);
subflow->setsockopt_seq = msk->setsockopt_seq;
}
}
-void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk)
+/* unfortunately this is different enough from the tcp version so
+ * that we can't factor it out
+ */
+int mptcp_set_rcvlowat(struct sock *sk, int val)
{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct mptcp_subflow_context *subflow;
+ int space, cap;
- msk_owned_by_me(msk);
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
+ cap = sk->sk_rcvbuf >> 1;
+ else
+ cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
+ val = min(val, cap);
+ WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
- if (READ_ONCE(subflow->setsockopt_seq) != msk->setsockopt_seq) {
- sync_socket_options(msk, ssk);
+ /* Check if we need to signal EPOLLIN right now */
+ if (mptcp_epollin_ready(sk))
+ sk->sk_data_ready(sk);
- subflow->setsockopt_seq = msk->setsockopt_seq;
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
+ return 0;
+
+ space = __tcp_space_from_win(mptcp_sk(sk)->scaling_ratio, val);
+ if (space <= sk->sk_rcvbuf)
+ return 0;
+
+ /* propagate the rcvbuf changes to all the subflows */
+ WRITE_ONCE(sk->sk_rcvbuf, space);
+ mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow;
+
+ slow = lock_sock_fast(ssk);
+ WRITE_ONCE(ssk->sk_rcvbuf, space);
+ tcp_sk(ssk)->window_clamp = val;
+ unlock_sock_fast(ssk, slow);
}
+ return 0;
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 9c1f8d1d63d2..e120e9616454 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -421,6 +421,7 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
void __mptcp_set_connected(struct sock *sk)
{
+ __mptcp_propagate_sndbuf(sk, mptcp_sk(sk)->first);
if (sk->sk_state == TCP_SYN_SENT) {
inet_sk_state_store(sk, TCP_ESTABLISHED);
sk->sk_state_change(sk);
@@ -472,7 +473,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
return;
msk = mptcp_sk(parent);
- mptcp_propagate_sndbuf(parent, sk);
subflow->rel_write_seq = 1;
subflow->conn_finished = 1;
subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
@@ -1237,7 +1237,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
struct sk_buff *skb;
if (!skb_peek(&ssk->sk_receive_queue))
- WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
+ WRITE_ONCE(subflow->data_avail, false);
if (subflow->data_avail)
return true;
@@ -1271,7 +1271,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
continue;
}
- WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+ WRITE_ONCE(subflow->data_avail, true);
break;
}
return true;
@@ -1293,7 +1293,7 @@ fallback:
goto reset;
}
mptcp_subflow_fail(msk, ssk);
- WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+ WRITE_ONCE(subflow->data_avail, true);
return true;
}
@@ -1310,7 +1310,7 @@ reset:
while ((skb = skb_peek(&ssk->sk_receive_queue)))
sk_eat_skb(ssk, skb);
tcp_send_active_reset(ssk, GFP_ATOMIC);
- WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
+ WRITE_ONCE(subflow->data_avail, false);
return false;
}
@@ -1322,7 +1322,7 @@ reset:
subflow->map_seq = READ_ONCE(msk->ack_seq);
subflow->map_data_len = skb->len;
subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
- WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
+ WRITE_ONCE(subflow->data_avail, true);
return true;
}
@@ -1334,7 +1334,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
if (subflow->map_valid &&
mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
subflow->map_valid = 0;
- WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_NODATA);
+ WRITE_ONCE(subflow->data_avail, false);
pr_debug("Done with mapping: seq=%u data_len=%u",
subflow->map_subflow_seq,
@@ -1405,10 +1405,18 @@ static void subflow_data_ready(struct sock *sk)
WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
!subflow->mp_join && !(state & TCPF_CLOSE));
- if (mptcp_subflow_data_available(sk))
+ if (mptcp_subflow_data_available(sk)) {
mptcp_data_ready(parent, sk);
- else if (unlikely(sk->sk_err))
+
+ /* subflow-level lowat test are not relevant.
+ * respect the msk-level threshold eventually mandating an immediate ack
+ */
+ if (mptcp_data_avail(msk) < parent->sk_rcvlowat &&
+ (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ } else if (unlikely(sk->sk_err)) {
subflow_error_report(sk);
+ }
}
static void subflow_write_space(struct sock *ssk)
@@ -1525,8 +1533,6 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
if (addr.ss_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
#endif
- mptcp_sockopt_sync(msk, ssk);
-
ssk->sk_bound_dev_if = ifindex;
err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
if (err)
@@ -1637,7 +1643,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
err = security_mptcp_add_subflow(sk, sf->sk);
if (err)
- goto release_ssk;
+ goto err_free;
/* the newly created socket has to be in the same cgroup as its parent */
mptcp_attach_cgroup(sk, sf->sk);
@@ -1651,15 +1657,12 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
sock_inuse_add(net, 1);
err = tcp_set_ulp(sf->sk, "mptcp");
+ if (err)
+ goto err_free;
-release_ssk:
+ mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk);
release_sock(sf->sk);
- if (err) {
- sock_release(sf);
- return err;
- }
-
/* the newly created socket really belongs to the owning MPTCP master
* socket, even if for additional subflows the allocation is performed
* by a kernel workqueue. Adjust inode references, so that the
@@ -1679,6 +1682,11 @@ release_ssk:
mptcp_subflow_ops_override(sf->sk);
return 0;
+
+err_free:
+ release_sock(sf->sk);
+ sock_release(sf);
+ return err;
}
static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
@@ -1728,7 +1736,6 @@ static void subflow_state_change(struct sock *sk)
msk = mptcp_sk(parent);
if (subflow_simultaneous_connect(sk)) {
- mptcp_propagate_sndbuf(parent, sk);
mptcp_do_fallback(sk);
mptcp_rcv_space_init(msk, sk);
pr_fallback(msk);
@@ -2044,7 +2051,6 @@ void __init mptcp_subflow_init(void)
subflow_v6m_specific.send_check = ipv4_specific.send_check;
subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
- subflow_v6m_specific.net_frag_header_len = 0;
subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
tcpv6_prot_override = tcpv6_prot;
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 16915f8eef2b..467671f2d42f 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -153,7 +153,7 @@ void synproxy_init_timestamp_cookie(const struct nf_synproxy_info *info,
struct synproxy_options *opts)
{
opts->tsecr = opts->tsval;
- opts->tsval = tcp_time_stamp_raw() & ~0x3f;
+ opts->tsval = tcp_clock_ms() & ~0x3f;
if (opts->options & NF_SYNPROXY_OPT_WSCALE) {
opts->tsval |= opts->wscale;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 8315d31b53db..92ef5ed2e7b0 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -225,7 +225,8 @@ static void genl_op_from_split(struct genl_op_iter *iter)
}
if (i + cnt < family->n_split_ops &&
- family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP) {
+ family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP &&
+ (!cnt || family->split_ops[i + cnt].cmd == iter->doit.cmd)) {
iter->dumpit = family->split_ops[i + cnt];
genl_op_fill_in_reject_policy_split(family, &iter->dumpit);
cnt++;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 7c652d14528b..43b06cb284ce 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -690,7 +690,6 @@ static struct tc_action_ops act_ct_ops;
struct tc_ct_action_net {
struct tc_action_net tn; /* Must be first */
- bool labels;
};
/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
@@ -829,8 +828,13 @@ static void tcf_ct_params_free(struct tcf_ct_params *params)
}
if (params->ct_ft)
tcf_ct_flow_table_put(params->ct_ft);
- if (params->tmpl)
+ if (params->tmpl) {
+ if (params->put_labels)
+ nf_connlabels_put(nf_ct_net(params->tmpl));
+
nf_ct_put(params->tmpl);
+ }
+
kfree(params);
}
@@ -1154,9 +1158,9 @@ static int tcf_ct_fill_params(struct net *net,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
- struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
struct nf_conntrack_zone zone;
int err, family, proto, len;
+ bool put_labels = false;
struct nf_conn *tmpl;
char *name;
@@ -1186,15 +1190,20 @@ static int tcf_ct_fill_params(struct net *net,
}
if (tb[TCA_CT_LABELS]) {
+ unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
+
if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
return -EOPNOTSUPP;
}
- if (!tn->labels) {
+ if (nf_connlabels_get(net, n_bits - 1)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
return -EOPNOTSUPP;
+ } else {
+ put_labels = true;
}
+
tcf_ct_set_key_val(tb,
p->labels, TCA_CT_LABELS,
p->labels_mask, TCA_CT_LABELS_MASK,
@@ -1238,10 +1247,15 @@ static int tcf_ct_fill_params(struct net *net,
}
}
+ p->put_labels = put_labels;
+
if (p->ct_action & TCA_CT_ACT_COMMIT)
__set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
return 0;
err:
+ if (put_labels)
+ nf_connlabels_put(net);
+
nf_ct_put(p->tmpl);
p->tmpl = NULL;
return err;
@@ -1542,32 +1556,13 @@ static struct tc_action_ops act_ct_ops = {
static __net_init int ct_init_net(struct net *net)
{
- unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
- if (nf_connlabels_get(net, n_bits - 1)) {
- tn->labels = false;
- pr_err("act_ct: Failed to set connlabels length");
- } else {
- tn->labels = true;
- }
-
return tc_action_net_init(net, &tn->tn, &act_ct_ops);
}
static void __net_exit ct_exit_net(struct list_head *net_list)
{
- struct net *net;
-
- rtnl_lock();
- list_for_each_entry(net, net_list, exit_list) {
- struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
-
- if (tn->labels)
- nf_connlabels_put(net);
- }
- rtnl_unlock();
-
tc_action_net_exit(net_list, act_ct_ops.net_id);
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 8eacdb54e72f..bf9d00518a60 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -383,6 +383,10 @@ static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb,
if (fq_fastpath_check(sch, skb, now)) {
q->internal.stat_fastpath_packets++;
+ if (skb->sk == sk && q->rate_enable &&
+ READ_ONCE(sk->sk_pacing_status) != SK_PACING_FQ)
+ smp_store_release(&sk->sk_pacing_status,
+ SK_PACING_FQ);
return &q->internal;
}
@@ -651,7 +655,7 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
begin:
head = fq_pband_head_select(pband);
if (!head) {
- while (++retry < FQ_BANDS) {
+ while (++retry <= FQ_BANDS) {
if (++q->band_nr == FQ_BANDS)
q->band_nr = 0;
pband = &q->band_flows[q->band_nr];
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 546c10adcacd..5598f8be18ae 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1003,7 +1003,7 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
*cl = list_first_entry(&agg->active, struct qfq_class, alist);
skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
if (skb == NULL)
- WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
+ qdisc_warn_nonwc("qfq_dequeue", (*cl)->qdisc);
else
*len = qdisc_pkt_len(skb);
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 478b2c0060aa..762f424ff2d5 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -144,8 +144,7 @@ void tls_err_abort(struct sock *sk, int err);
int init_prot_info(struct tls_prot_info *prot,
const struct tls_crypto_info *crypto_info,
- const struct tls_cipher_desc *cipher_desc,
- int mode);
+ const struct tls_cipher_desc *cipher_desc);
int tls_set_sw_offload(struct sock *sk, int tx);
void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index f01543557a60..bf8ed36b1ad6 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -1099,7 +1099,7 @@ int tls_set_device_offload(struct sock *sk)
goto release_netdev;
}
- rc = init_prot_info(prot, crypto_info, cipher_desc, TLS_HW);
+ rc = init_prot_info(prot, crypto_info, cipher_desc);
if (rc)
goto release_netdev;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index b5905e60d792..a78e8e722409 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2629,8 +2629,7 @@ static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
int init_prot_info(struct tls_prot_info *prot,
const struct tls_crypto_info *crypto_info,
- const struct tls_cipher_desc *cipher_desc,
- int mode)
+ const struct tls_cipher_desc *cipher_desc)
{
u16 nonce_size = cipher_desc->nonce;
@@ -2643,11 +2642,6 @@ int init_prot_info(struct tls_prot_info *prot,
prot->tail_size = 0;
}
- if (mode == TLS_HW) {
- prot->aad_size = 0;
- prot->tail_size = 0;
- }
-
/* Sanity-check the sizes for stack allocations. */
if (nonce_size > TLS_MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE)
return -EINVAL;
@@ -2707,7 +2701,7 @@ int tls_set_sw_offload(struct sock *sk, int tx)
goto free_priv;
}
- rc = init_prot_info(prot, crypto_info, cipher_desc, TLS_SW);
+ rc = init_prot_info(prot, crypto_info, cipher_desc);
if (rc)
goto free_priv;
diff --git a/tools/net/ynl/generated/devlink-user.c b/tools/net/ynl/generated/devlink-user.c
index 2cb2518500cb..75b744b47986 100644
--- a/tools/net/ynl/generated/devlink-user.c
+++ b/tools/net/ynl/generated/devlink-user.c
@@ -16,14 +16,25 @@
static const char * const devlink_op_strmap[] = {
[3] = "get",
[7] = "port-get",
+ [DEVLINK_CMD_PORT_NEW] = "port-new",
[13] = "sb-get",
[17] = "sb-pool-get",
[21] = "sb-port-pool-get",
[25] = "sb-tc-pool-bind-get",
+ [DEVLINK_CMD_ESWITCH_GET] = "eswitch-get",
+ [DEVLINK_CMD_DPIPE_TABLE_GET] = "dpipe-table-get",
+ [DEVLINK_CMD_DPIPE_ENTRIES_GET] = "dpipe-entries-get",
+ [DEVLINK_CMD_DPIPE_HEADERS_GET] = "dpipe-headers-get",
+ [DEVLINK_CMD_RESOURCE_DUMP] = "resource-dump",
+ [DEVLINK_CMD_RELOAD] = "reload",
[DEVLINK_CMD_PARAM_GET] = "param-get",
[DEVLINK_CMD_REGION_GET] = "region-get",
+ [DEVLINK_CMD_REGION_NEW] = "region-new",
+ [DEVLINK_CMD_REGION_READ] = "region-read",
+ [DEVLINK_CMD_PORT_PARAM_GET] = "port-param-get",
[DEVLINK_CMD_INFO_GET] = "info-get",
[DEVLINK_CMD_HEALTH_REPORTER_GET] = "health-reporter-get",
+ [DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET] = "health-reporter-dump-get",
[63] = "trap-get",
[67] = "trap-group-get",
[71] = "trap-policer-get",
@@ -51,7 +62,303 @@ const char *devlink_sb_pool_type_str(enum devlink_sb_pool_type value)
return devlink_sb_pool_type_strmap[value];
}
+static const char * const devlink_port_type_strmap[] = {
+ [0] = "notset",
+ [1] = "auto",
+ [2] = "eth",
+ [3] = "ib",
+};
+
+const char *devlink_port_type_str(enum devlink_port_type value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_port_type_strmap))
+ return NULL;
+ return devlink_port_type_strmap[value];
+}
+
+static const char * const devlink_port_flavour_strmap[] = {
+ [0] = "physical",
+ [1] = "cpu",
+ [2] = "dsa",
+ [3] = "pci_pf",
+ [4] = "pci_vf",
+ [5] = "virtual",
+ [6] = "unused",
+ [7] = "pci_sf",
+};
+
+const char *devlink_port_flavour_str(enum devlink_port_flavour value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_port_flavour_strmap))
+ return NULL;
+ return devlink_port_flavour_strmap[value];
+}
+
+static const char * const devlink_port_fn_state_strmap[] = {
+ [0] = "inactive",
+ [1] = "active",
+};
+
+const char *devlink_port_fn_state_str(enum devlink_port_fn_state value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_port_fn_state_strmap))
+ return NULL;
+ return devlink_port_fn_state_strmap[value];
+}
+
+static const char * const devlink_port_fn_opstate_strmap[] = {
+ [0] = "detached",
+ [1] = "attached",
+};
+
+const char *devlink_port_fn_opstate_str(enum devlink_port_fn_opstate value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_port_fn_opstate_strmap))
+ return NULL;
+ return devlink_port_fn_opstate_strmap[value];
+}
+
+static const char * const devlink_port_fn_attr_cap_strmap[] = {
+ [0] = "roce-bit",
+ [1] = "migratable-bit",
+};
+
+const char *devlink_port_fn_attr_cap_str(enum devlink_port_fn_attr_cap value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_port_fn_attr_cap_strmap))
+ return NULL;
+ return devlink_port_fn_attr_cap_strmap[value];
+}
+
+static const char * const devlink_sb_threshold_type_strmap[] = {
+ [0] = "static",
+ [1] = "dynamic",
+};
+
+const char *devlink_sb_threshold_type_str(enum devlink_sb_threshold_type value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_sb_threshold_type_strmap))
+ return NULL;
+ return devlink_sb_threshold_type_strmap[value];
+}
+
+static const char * const devlink_eswitch_mode_strmap[] = {
+ [0] = "legacy",
+ [1] = "switchdev",
+};
+
+const char *devlink_eswitch_mode_str(enum devlink_eswitch_mode value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_eswitch_mode_strmap))
+ return NULL;
+ return devlink_eswitch_mode_strmap[value];
+}
+
+static const char * const devlink_eswitch_inline_mode_strmap[] = {
+ [0] = "none",
+ [1] = "link",
+ [2] = "network",
+ [3] = "transport",
+};
+
+const char *
+devlink_eswitch_inline_mode_str(enum devlink_eswitch_inline_mode value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_eswitch_inline_mode_strmap))
+ return NULL;
+ return devlink_eswitch_inline_mode_strmap[value];
+}
+
+static const char * const devlink_eswitch_encap_mode_strmap[] = {
+ [0] = "none",
+ [1] = "basic",
+};
+
+const char *
+devlink_eswitch_encap_mode_str(enum devlink_eswitch_encap_mode value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_eswitch_encap_mode_strmap))
+ return NULL;
+ return devlink_eswitch_encap_mode_strmap[value];
+}
+
+static const char * const devlink_dpipe_match_type_strmap[] = {
+ [0] = "field-exact",
+};
+
+const char *devlink_dpipe_match_type_str(enum devlink_dpipe_match_type value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_dpipe_match_type_strmap))
+ return NULL;
+ return devlink_dpipe_match_type_strmap[value];
+}
+
+static const char * const devlink_dpipe_action_type_strmap[] = {
+ [0] = "field-modify",
+};
+
+const char *devlink_dpipe_action_type_str(enum devlink_dpipe_action_type value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_dpipe_action_type_strmap))
+ return NULL;
+ return devlink_dpipe_action_type_strmap[value];
+}
+
+static const char * const devlink_dpipe_field_mapping_type_strmap[] = {
+ [0] = "none",
+ [1] = "ifindex",
+};
+
+const char *
+devlink_dpipe_field_mapping_type_str(enum devlink_dpipe_field_mapping_type value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_dpipe_field_mapping_type_strmap))
+ return NULL;
+ return devlink_dpipe_field_mapping_type_strmap[value];
+}
+
+static const char * const devlink_resource_unit_strmap[] = {
+ [0] = "entry",
+};
+
+const char *devlink_resource_unit_str(enum devlink_resource_unit value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_resource_unit_strmap))
+ return NULL;
+ return devlink_resource_unit_strmap[value];
+}
+
+static const char * const devlink_reload_action_strmap[] = {
+ [1] = "driver-reinit",
+ [2] = "fw-activate",
+};
+
+const char *devlink_reload_action_str(enum devlink_reload_action value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_reload_action_strmap))
+ return NULL;
+ return devlink_reload_action_strmap[value];
+}
+
+static const char * const devlink_param_cmode_strmap[] = {
+ [0] = "runtime",
+ [1] = "driverinit",
+ [2] = "permanent",
+};
+
+const char *devlink_param_cmode_str(enum devlink_param_cmode value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_param_cmode_strmap))
+ return NULL;
+ return devlink_param_cmode_strmap[value];
+}
+
+static const char * const devlink_flash_overwrite_strmap[] = {
+ [0] = "settings-bit",
+ [1] = "identifiers-bit",
+};
+
+const char *devlink_flash_overwrite_str(enum devlink_flash_overwrite value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_flash_overwrite_strmap))
+ return NULL;
+ return devlink_flash_overwrite_strmap[value];
+}
+
+static const char * const devlink_trap_action_strmap[] = {
+ [0] = "drop",
+ [1] = "trap",
+ [2] = "mirror",
+};
+
+const char *devlink_trap_action_str(enum devlink_trap_action value)
+{
+ if (value < 0 || value >= (int)MNL_ARRAY_SIZE(devlink_trap_action_strmap))
+ return NULL;
+ return devlink_trap_action_strmap[value];
+}
+
/* Policies */
+struct ynl_policy_attr devlink_dl_dpipe_match_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_MATCH_TYPE] = { .name = "dpipe-match-type", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_HEADER_ID] = { .name = "dpipe-header-id", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_HEADER_GLOBAL] = { .name = "dpipe-header-global", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_DPIPE_HEADER_INDEX] = { .name = "dpipe-header-index", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_FIELD_ID] = { .name = "dpipe-field-id", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_match_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_match_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_match_value_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_MATCH] = { .name = "dpipe-match", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_match_nest, },
+ [DEVLINK_ATTR_DPIPE_VALUE] = { .name = "dpipe-value", .type = YNL_PT_BINARY,},
+ [DEVLINK_ATTR_DPIPE_VALUE_MASK] = { .name = "dpipe-value-mask", .type = YNL_PT_BINARY,},
+ [DEVLINK_ATTR_DPIPE_VALUE_MAPPING] = { .name = "dpipe-value-mapping", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_match_value_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_match_value_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_action_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_ACTION_TYPE] = { .name = "dpipe-action-type", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_HEADER_ID] = { .name = "dpipe-header-id", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_HEADER_GLOBAL] = { .name = "dpipe-header-global", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_DPIPE_HEADER_INDEX] = { .name = "dpipe-header-index", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_FIELD_ID] = { .name = "dpipe-field-id", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_action_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_action_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_action_value_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_ACTION] = { .name = "dpipe-action", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_action_nest, },
+ [DEVLINK_ATTR_DPIPE_VALUE] = { .name = "dpipe-value", .type = YNL_PT_BINARY,},
+ [DEVLINK_ATTR_DPIPE_VALUE_MASK] = { .name = "dpipe-value-mask", .type = YNL_PT_BINARY,},
+ [DEVLINK_ATTR_DPIPE_VALUE_MAPPING] = { .name = "dpipe-value-mapping", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_action_value_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_action_value_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_field_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_FIELD_NAME] = { .name = "dpipe-field-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_DPIPE_FIELD_ID] = { .name = "dpipe-field-id", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH] = { .name = "dpipe-field-bitwidth", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE] = { .name = "dpipe-field-mapping-type", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_field_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_field_policy,
+};
+
+struct ynl_policy_attr devlink_dl_resource_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_RESOURCE_NAME] = { .name = "resource-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_RESOURCE_ID] = { .name = "resource-id", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE] = { .name = "resource-size", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE_NEW] = { .name = "resource-size-new", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE_VALID] = { .name = "resource-size-valid", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_RESOURCE_SIZE_MIN] = { .name = "resource-size-min", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE_MAX] = { .name = "resource-size-max", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE_GRAN] = { .name = "resource-size-gran", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_UNIT] = { .name = "resource-unit", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_RESOURCE_OCC] = { .name = "resource-occ", .type = YNL_PT_U64, },
+};
+
+struct ynl_policy_nest devlink_dl_resource_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_resource_policy,
+};
+
struct ynl_policy_attr devlink_dl_info_version_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_INFO_VERSION_NAME] = { .name = "info-version-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_INFO_VERSION_VALUE] = { .name = "info-version-value", .type = YNL_PT_NUL_STR, },
@@ -62,6 +369,31 @@ struct ynl_policy_nest devlink_dl_info_version_nest = {
.table = devlink_dl_info_version_policy,
};
+struct ynl_policy_attr devlink_dl_fmsg_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_FMSG_OBJ_NEST_START] = { .name = "fmsg-obj-nest-start", .type = YNL_PT_FLAG, },
+ [DEVLINK_ATTR_FMSG_PAIR_NEST_START] = { .name = "fmsg-pair-nest-start", .type = YNL_PT_FLAG, },
+ [DEVLINK_ATTR_FMSG_ARR_NEST_START] = { .name = "fmsg-arr-nest-start", .type = YNL_PT_FLAG, },
+ [DEVLINK_ATTR_FMSG_NEST_END] = { .name = "fmsg-nest-end", .type = YNL_PT_FLAG, },
+ [DEVLINK_ATTR_FMSG_OBJ_NAME] = { .name = "fmsg-obj-name", .type = YNL_PT_NUL_STR, },
+};
+
+struct ynl_policy_nest devlink_dl_fmsg_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_fmsg_policy,
+};
+
+struct ynl_policy_attr devlink_dl_port_function_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = {
+ [DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .name = "hw-addr", .type = YNL_PT_BINARY,},
+ [DEVLINK_PORT_FN_ATTR_STATE] = { .name = "state", .type = YNL_PT_U8, },
+ [DEVLINK_PORT_FN_ATTR_OPSTATE] = { .name = "opstate", .type = YNL_PT_U8, },
+ [DEVLINK_PORT_FN_ATTR_CAPS] = { .name = "caps", .type = YNL_PT_BITFIELD32, },
+};
+
+struct ynl_policy_nest devlink_dl_port_function_nest = {
+ .max_attr = DEVLINK_PORT_FUNCTION_ATTR_MAX,
+ .table = devlink_dl_port_function_policy,
+};
+
struct ynl_policy_attr devlink_dl_reload_stats_entry_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_RELOAD_STATS_LIMIT] = { .name = "reload-stats-limit", .type = YNL_PT_U8, },
[DEVLINK_ATTR_RELOAD_STATS_VALUE] = { .name = "reload-stats-value", .type = YNL_PT_U32, },
@@ -81,6 +413,69 @@ struct ynl_policy_nest devlink_dl_reload_act_stats_nest = {
.table = devlink_dl_reload_act_stats_policy,
};
+struct ynl_policy_attr devlink_dl_selftest_id_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = {
+ [DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .name = "flash", .type = YNL_PT_FLAG, },
+};
+
+struct ynl_policy_nest devlink_dl_selftest_id_nest = {
+ .max_attr = DEVLINK_ATTR_SELFTEST_ID_MAX,
+ .table = devlink_dl_selftest_id_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_table_matches_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_MATCH] = { .name = "dpipe-match", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_match_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_table_matches_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_table_matches_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_table_actions_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_ACTION] = { .name = "dpipe-action", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_action_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_table_actions_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_table_actions_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_entry_match_values_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_MATCH_VALUE] = { .name = "dpipe-match-value", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_match_value_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_entry_match_values_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_entry_match_values_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_entry_action_values_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_ACTION_VALUE] = { .name = "dpipe-action-value", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_action_value_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_entry_action_values_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_entry_action_values_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_header_fields_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_FIELD] = { .name = "dpipe-field", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_field_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_header_fields_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_header_fields_policy,
+};
+
+struct ynl_policy_attr devlink_dl_resource_list_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_RESOURCE] = { .name = "resource", .type = YNL_PT_NEST, .nest = &devlink_dl_resource_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_resource_list_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_resource_list_policy,
+};
+
struct ynl_policy_attr devlink_dl_reload_act_info_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_RELOAD_ACTION] = { .name = "reload-action", .type = YNL_PT_U8, },
[DEVLINK_ATTR_RELOAD_ACTION_STATS] = { .name = "reload-action-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_stats_nest, },
@@ -91,6 +486,45 @@ struct ynl_policy_nest devlink_dl_reload_act_info_nest = {
.table = devlink_dl_reload_act_info_policy,
};
+struct ynl_policy_attr devlink_dl_dpipe_table_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .name = "dpipe-table-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_DPIPE_TABLE_SIZE] = { .name = "dpipe-table-size", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_DPIPE_TABLE_MATCHES] = { .name = "dpipe-table-matches", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_table_matches_nest, },
+ [DEVLINK_ATTR_DPIPE_TABLE_ACTIONS] = { .name = "dpipe-table-actions", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_table_actions_nest, },
+ [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .name = "dpipe-table-counters-enabled", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID] = { .name = "dpipe-table-resource-id", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS] = { .name = "dpipe-table-resource-units", .type = YNL_PT_U64, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_table_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_table_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_entry_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_ENTRY_INDEX] = { .name = "dpipe-entry-index", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES] = { .name = "dpipe-entry-match-values", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_entry_match_values_nest, },
+ [DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES] = { .name = "dpipe-entry-action-values", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_entry_action_values_nest, },
+ [DEVLINK_ATTR_DPIPE_ENTRY_COUNTER] = { .name = "dpipe-entry-counter", .type = YNL_PT_U64, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_entry_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_entry_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_header_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_HEADER_NAME] = { .name = "dpipe-header-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_DPIPE_HEADER_ID] = { .name = "dpipe-header-id", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_HEADER_GLOBAL] = { .name = "dpipe-header-global", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_DPIPE_HEADER_FIELDS] = { .name = "dpipe-header-fields", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_header_fields_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_header_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_header_policy,
+};
+
struct ynl_policy_attr devlink_dl_reload_stats_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_RELOAD_ACTION_INFO] = { .name = "reload-action-info", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_info_nest, },
};
@@ -100,6 +534,33 @@ struct ynl_policy_nest devlink_dl_reload_stats_nest = {
.table = devlink_dl_reload_stats_policy,
};
+struct ynl_policy_attr devlink_dl_dpipe_tables_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_TABLE] = { .name = "dpipe-table", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_table_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_tables_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_tables_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_entries_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_ENTRY] = { .name = "dpipe-entry", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_entry_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_entries_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_entries_policy,
+};
+
+struct ynl_policy_attr devlink_dl_dpipe_headers_policy[DEVLINK_ATTR_MAX + 1] = {
+ [DEVLINK_ATTR_DPIPE_HEADER] = { .name = "dpipe-header", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_header_nest, },
+};
+
+struct ynl_policy_nest devlink_dl_dpipe_headers_nest = {
+ .max_attr = DEVLINK_ATTR_MAX,
+ .table = devlink_dl_dpipe_headers_policy,
+};
+
struct ynl_policy_attr devlink_dl_dev_stats_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_RELOAD_STATS] = { .name = "reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, },
[DEVLINK_ATTR_REMOTE_RELOAD_STATS] = { .name = "remote-reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, },
@@ -114,12 +575,75 @@ struct ynl_policy_attr devlink_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .name = "bus-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_DEV_NAME] = { .name = "dev-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_PORT_INDEX] = { .name = "port-index", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_PORT_TYPE] = { .name = "port-type", .type = YNL_PT_U16, },
+ [DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .name = "port-split-count", .type = YNL_PT_U32, },
[DEVLINK_ATTR_SB_INDEX] = { .name = "sb-index", .type = YNL_PT_U32, },
[DEVLINK_ATTR_SB_POOL_INDEX] = { .name = "sb-pool-index", .type = YNL_PT_U16, },
[DEVLINK_ATTR_SB_POOL_TYPE] = { .name = "sb-pool-type", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_SB_POOL_SIZE] = { .name = "sb-pool-size", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .name = "sb-pool-threshold-type", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_SB_THRESHOLD] = { .name = "sb-threshold", .type = YNL_PT_U32, },
[DEVLINK_ATTR_SB_TC_INDEX] = { .name = "sb-tc-index", .type = YNL_PT_U16, },
+ [DEVLINK_ATTR_ESWITCH_MODE] = { .name = "eswitch-mode", .type = YNL_PT_U16, },
+ [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .name = "eswitch-inline-mode", .type = YNL_PT_U16, },
+ [DEVLINK_ATTR_DPIPE_TABLES] = { .name = "dpipe-tables", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_tables_nest, },
+ [DEVLINK_ATTR_DPIPE_TABLE] = { .name = "dpipe-table", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_table_nest, },
+ [DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .name = "dpipe-table-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_DPIPE_TABLE_SIZE] = { .name = "dpipe-table-size", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_DPIPE_TABLE_MATCHES] = { .name = "dpipe-table-matches", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_table_matches_nest, },
+ [DEVLINK_ATTR_DPIPE_TABLE_ACTIONS] = { .name = "dpipe-table-actions", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_table_actions_nest, },
+ [DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .name = "dpipe-table-counters-enabled", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_DPIPE_ENTRIES] = { .name = "dpipe-entries", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_entries_nest, },
+ [DEVLINK_ATTR_DPIPE_ENTRY] = { .name = "dpipe-entry", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_entry_nest, },
+ [DEVLINK_ATTR_DPIPE_ENTRY_INDEX] = { .name = "dpipe-entry-index", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES] = { .name = "dpipe-entry-match-values", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_entry_match_values_nest, },
+ [DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES] = { .name = "dpipe-entry-action-values", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_entry_action_values_nest, },
+ [DEVLINK_ATTR_DPIPE_ENTRY_COUNTER] = { .name = "dpipe-entry-counter", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_DPIPE_MATCH] = { .name = "dpipe-match", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_match_nest, },
+ [DEVLINK_ATTR_DPIPE_MATCH_VALUE] = { .name = "dpipe-match-value", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_match_value_nest, },
+ [DEVLINK_ATTR_DPIPE_MATCH_TYPE] = { .name = "dpipe-match-type", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_ACTION] = { .name = "dpipe-action", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_action_nest, },
+ [DEVLINK_ATTR_DPIPE_ACTION_VALUE] = { .name = "dpipe-action-value", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_action_value_nest, },
+ [DEVLINK_ATTR_DPIPE_ACTION_TYPE] = { .name = "dpipe-action-type", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_VALUE] = { .name = "dpipe-value", .type = YNL_PT_BINARY,},
+ [DEVLINK_ATTR_DPIPE_VALUE_MASK] = { .name = "dpipe-value-mask", .type = YNL_PT_BINARY,},
+ [DEVLINK_ATTR_DPIPE_VALUE_MAPPING] = { .name = "dpipe-value-mapping", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_HEADERS] = { .name = "dpipe-headers", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_headers_nest, },
+ [DEVLINK_ATTR_DPIPE_HEADER] = { .name = "dpipe-header", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_header_nest, },
+ [DEVLINK_ATTR_DPIPE_HEADER_NAME] = { .name = "dpipe-header-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_DPIPE_HEADER_ID] = { .name = "dpipe-header-id", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_HEADER_FIELDS] = { .name = "dpipe-header-fields", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_header_fields_nest, },
+ [DEVLINK_ATTR_DPIPE_HEADER_GLOBAL] = { .name = "dpipe-header-global", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_DPIPE_HEADER_INDEX] = { .name = "dpipe-header-index", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_FIELD] = { .name = "dpipe-field", .type = YNL_PT_NEST, .nest = &devlink_dl_dpipe_field_nest, },
+ [DEVLINK_ATTR_DPIPE_FIELD_NAME] = { .name = "dpipe-field-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_DPIPE_FIELD_ID] = { .name = "dpipe-field-id", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH] = { .name = "dpipe-field-bitwidth", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE] = { .name = "dpipe-field-mapping-type", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, },
+ [DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .name = "eswitch-encap-mode", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_RESOURCE_LIST] = { .name = "resource-list", .type = YNL_PT_NEST, .nest = &devlink_dl_resource_list_nest, },
+ [DEVLINK_ATTR_RESOURCE] = { .name = "resource", .type = YNL_PT_NEST, .nest = &devlink_dl_resource_nest, },
+ [DEVLINK_ATTR_RESOURCE_NAME] = { .name = "resource-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_RESOURCE_ID] = { .name = "resource-id", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE] = { .name = "resource-size", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE_NEW] = { .name = "resource-size-new", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE_VALID] = { .name = "resource-size-valid", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_RESOURCE_SIZE_MIN] = { .name = "resource-size-min", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE_MAX] = { .name = "resource-size-max", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_SIZE_GRAN] = { .name = "resource-size-gran", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RESOURCE_UNIT] = { .name = "resource-unit", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_RESOURCE_OCC] = { .name = "resource-occ", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID] = { .name = "dpipe-table-resource-id", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS] = { .name = "dpipe-table-resource-units", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_PORT_FLAVOUR] = { .name = "port-flavour", .type = YNL_PT_U16, },
[DEVLINK_ATTR_PARAM_NAME] = { .name = "param-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_PARAM_TYPE] = { .name = "param-type", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .name = "param-value-cmode", .type = YNL_PT_U8, },
[DEVLINK_ATTR_REGION_NAME] = { .name = "region-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .name = "region-snapshot-id", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .name = "region-chunk-addr", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .name = "region-chunk-len", .type = YNL_PT_U64, },
[DEVLINK_ATTR_INFO_DRIVER_NAME] = { .name = "info-driver-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_INFO_SERIAL_NUMBER] = { .name = "info-serial-number", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_INFO_VERSION_FIXED] = { .name = "info-version-fixed", .type = YNL_PT_NEST, .nest = &devlink_dl_info_version_nest, },
@@ -127,12 +651,35 @@ struct ynl_policy_attr devlink_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_INFO_VERSION_STORED] = { .name = "info-version-stored", .type = YNL_PT_NEST, .nest = &devlink_dl_info_version_nest, },
[DEVLINK_ATTR_INFO_VERSION_NAME] = { .name = "info-version-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_INFO_VERSION_VALUE] = { .name = "info-version-value", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_FMSG] = { .name = "fmsg", .type = YNL_PT_NEST, .nest = &devlink_dl_fmsg_nest, },
+ [DEVLINK_ATTR_FMSG_OBJ_NEST_START] = { .name = "fmsg-obj-nest-start", .type = YNL_PT_FLAG, },
+ [DEVLINK_ATTR_FMSG_PAIR_NEST_START] = { .name = "fmsg-pair-nest-start", .type = YNL_PT_FLAG, },
+ [DEVLINK_ATTR_FMSG_ARR_NEST_START] = { .name = "fmsg-arr-nest-start", .type = YNL_PT_FLAG, },
+ [DEVLINK_ATTR_FMSG_NEST_END] = { .name = "fmsg-nest-end", .type = YNL_PT_FLAG, },
+ [DEVLINK_ATTR_FMSG_OBJ_NAME] = { .name = "fmsg-obj-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .name = "health-reporter-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .name = "health-reporter-graceful-period", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .name = "health-reporter-auto-recover", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .name = "flash-update-file-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .name = "flash-update-component", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_PORT_PCI_PF_NUMBER] = { .name = "port-pci-pf-number", .type = YNL_PT_U16, },
[DEVLINK_ATTR_TRAP_NAME] = { .name = "trap-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_TRAP_ACTION] = { .name = "trap-action", .type = YNL_PT_U8, },
[DEVLINK_ATTR_TRAP_GROUP_NAME] = { .name = "trap-group-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_RELOAD_FAILED] = { .name = "reload-failed", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_NETNS_FD] = { .name = "netns-fd", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_NETNS_PID] = { .name = "netns-pid", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_NETNS_ID] = { .name = "netns-id", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP] = { .name = "health-reporter-auto-dump", .type = YNL_PT_U8, },
[DEVLINK_ATTR_TRAP_POLICER_ID] = { .name = "trap-policer-id", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .name = "trap-policer-rate", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .name = "trap-policer-burst", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_PORT_FUNCTION] = { .name = "port-function", .type = YNL_PT_NEST, .nest = &devlink_dl_port_function_nest, },
+ [DEVLINK_ATTR_PORT_CONTROLLER_NUMBER] = { .name = "port-controller-number", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK] = { .name = "flash-update-overwrite-mask", .type = YNL_PT_BITFIELD32, },
[DEVLINK_ATTR_RELOAD_ACTION] = { .name = "reload-action", .type = YNL_PT_U8, },
+ [DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED] = { .name = "reload-actions-performed", .type = YNL_PT_BITFIELD32, },
+ [DEVLINK_ATTR_RELOAD_LIMITS] = { .name = "reload-limits", .type = YNL_PT_BITFIELD32, },
[DEVLINK_ATTR_DEV_STATS] = { .name = "dev-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_dev_stats_nest, },
[DEVLINK_ATTR_RELOAD_STATS] = { .name = "reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, },
[DEVLINK_ATTR_RELOAD_STATS_ENTRY] = { .name = "reload-stats-entry", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_entry_nest, },
@@ -141,8 +688,17 @@ struct ynl_policy_attr devlink_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_REMOTE_RELOAD_STATS] = { .name = "remote-reload-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_stats_nest, },
[DEVLINK_ATTR_RELOAD_ACTION_INFO] = { .name = "reload-action-info", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_info_nest, },
[DEVLINK_ATTR_RELOAD_ACTION_STATS] = { .name = "reload-action-stats", .type = YNL_PT_NEST, .nest = &devlink_dl_reload_act_stats_nest, },
+ [DEVLINK_ATTR_PORT_PCI_SF_NUMBER] = { .name = "port-pci-sf-number", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_RATE_TX_SHARE] = { .name = "rate-tx-share", .type = YNL_PT_U64, },
+ [DEVLINK_ATTR_RATE_TX_MAX] = { .name = "rate-tx-max", .type = YNL_PT_U64, },
[DEVLINK_ATTR_RATE_NODE_NAME] = { .name = "rate-node-name", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_RATE_PARENT_NODE_NAME] = { .name = "rate-parent-node-name", .type = YNL_PT_NUL_STR, },
[DEVLINK_ATTR_LINECARD_INDEX] = { .name = "linecard-index", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_LINECARD_TYPE] = { .name = "linecard-type", .type = YNL_PT_NUL_STR, },
+ [DEVLINK_ATTR_SELFTESTS] = { .name = "selftests", .type = YNL_PT_NEST, .nest = &devlink_dl_selftest_id_nest, },
+ [DEVLINK_ATTR_RATE_TX_PRIORITY] = { .name = "rate-tx-priority", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_RATE_TX_WEIGHT] = { .name = "rate-tx-weight", .type = YNL_PT_U32, },
+ [DEVLINK_ATTR_REGION_DIRECT] = { .name = "region-direct", .type = YNL_PT_FLAG, },
};
struct ynl_policy_nest devlink_nest = {
@@ -151,6 +707,370 @@ struct ynl_policy_nest devlink_nest = {
};
/* Common nested types */
+void devlink_dl_dpipe_match_free(struct devlink_dl_dpipe_match *obj)
+{
+}
+
+int devlink_dl_dpipe_match_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_match *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_MATCH_TYPE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_match_type = 1;
+ dst->dpipe_match_type = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_HEADER_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_header_id = 1;
+ dst->dpipe_header_id = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_HEADER_GLOBAL) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_header_global = 1;
+ dst->dpipe_header_global = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_HEADER_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_header_index = 1;
+ dst->dpipe_header_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_FIELD_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_field_id = 1;
+ dst->dpipe_field_id = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return 0;
+}
+
+void
+devlink_dl_dpipe_match_value_free(struct devlink_dl_dpipe_match_value *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_dpipe_match; i++)
+ devlink_dl_dpipe_match_free(&obj->dpipe_match[i]);
+ free(obj->dpipe_match);
+ free(obj->dpipe_value);
+ free(obj->dpipe_value_mask);
+}
+
+int devlink_dl_dpipe_match_value_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_match_value *dst = yarg->data;
+ unsigned int n_dpipe_match = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->dpipe_match)
+ return ynl_error_parse(yarg, "attribute already present (dl-dpipe-match-value.dpipe-match)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_MATCH) {
+ n_dpipe_match++;
+ } else if (type == DEVLINK_ATTR_DPIPE_VALUE) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.dpipe_value_len = len;
+ dst->dpipe_value = malloc(len);
+ memcpy(dst->dpipe_value, mnl_attr_get_payload(attr), len);
+ } else if (type == DEVLINK_ATTR_DPIPE_VALUE_MASK) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.dpipe_value_mask_len = len;
+ dst->dpipe_value_mask = malloc(len);
+ memcpy(dst->dpipe_value_mask, mnl_attr_get_payload(attr), len);
+ } else if (type == DEVLINK_ATTR_DPIPE_VALUE_MAPPING) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_value_mapping = 1;
+ dst->dpipe_value_mapping = mnl_attr_get_u32(attr);
+ }
+ }
+
+ if (n_dpipe_match) {
+ dst->dpipe_match = calloc(n_dpipe_match, sizeof(*dst->dpipe_match));
+ dst->n_dpipe_match = n_dpipe_match;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_dpipe_match_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_DPIPE_MATCH) {
+ parg.data = &dst->dpipe_match[i];
+ if (devlink_dl_dpipe_match_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_dpipe_action_free(struct devlink_dl_dpipe_action *obj)
+{
+}
+
+int devlink_dl_dpipe_action_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_action *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_ACTION_TYPE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_action_type = 1;
+ dst->dpipe_action_type = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_HEADER_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_header_id = 1;
+ dst->dpipe_header_id = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_HEADER_GLOBAL) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_header_global = 1;
+ dst->dpipe_header_global = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_HEADER_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_header_index = 1;
+ dst->dpipe_header_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_FIELD_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_field_id = 1;
+ dst->dpipe_field_id = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return 0;
+}
+
+void
+devlink_dl_dpipe_action_value_free(struct devlink_dl_dpipe_action_value *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_dpipe_action; i++)
+ devlink_dl_dpipe_action_free(&obj->dpipe_action[i]);
+ free(obj->dpipe_action);
+ free(obj->dpipe_value);
+ free(obj->dpipe_value_mask);
+}
+
+int devlink_dl_dpipe_action_value_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_action_value *dst = yarg->data;
+ unsigned int n_dpipe_action = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->dpipe_action)
+ return ynl_error_parse(yarg, "attribute already present (dl-dpipe-action-value.dpipe-action)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_ACTION) {
+ n_dpipe_action++;
+ } else if (type == DEVLINK_ATTR_DPIPE_VALUE) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.dpipe_value_len = len;
+ dst->dpipe_value = malloc(len);
+ memcpy(dst->dpipe_value, mnl_attr_get_payload(attr), len);
+ } else if (type == DEVLINK_ATTR_DPIPE_VALUE_MASK) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = mnl_attr_get_payload_len(attr);
+ dst->_present.dpipe_value_mask_len = len;
+ dst->dpipe_value_mask = malloc(len);
+ memcpy(dst->dpipe_value_mask, mnl_attr_get_payload(attr), len);
+ } else if (type == DEVLINK_ATTR_DPIPE_VALUE_MAPPING) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_value_mapping = 1;
+ dst->dpipe_value_mapping = mnl_attr_get_u32(attr);
+ }
+ }
+
+ if (n_dpipe_action) {
+ dst->dpipe_action = calloc(n_dpipe_action, sizeof(*dst->dpipe_action));
+ dst->n_dpipe_action = n_dpipe_action;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_dpipe_action_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_DPIPE_ACTION) {
+ parg.data = &dst->dpipe_action[i];
+ if (devlink_dl_dpipe_action_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_dpipe_field_free(struct devlink_dl_dpipe_field *obj)
+{
+ free(obj->dpipe_field_name);
+}
+
+int devlink_dl_dpipe_field_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_field *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_FIELD_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dpipe_field_name_len = len;
+ dst->dpipe_field_name = malloc(len + 1);
+ memcpy(dst->dpipe_field_name, mnl_attr_get_str(attr), len);
+ dst->dpipe_field_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DPIPE_FIELD_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_field_id = 1;
+ dst->dpipe_field_id = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_field_bitwidth = 1;
+ dst->dpipe_field_bitwidth = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_field_mapping_type = 1;
+ dst->dpipe_field_mapping_type = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_resource_free(struct devlink_dl_resource *obj)
+{
+ free(obj->resource_name);
+}
+
+int devlink_dl_resource_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_resource *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_RESOURCE_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.resource_name_len = len;
+ dst->resource_name = malloc(len + 1);
+ memcpy(dst->resource_name, mnl_attr_get_str(attr), len);
+ dst->resource_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_RESOURCE_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.resource_id = 1;
+ dst->resource_id = mnl_attr_get_u64(attr);
+ } else if (type == DEVLINK_ATTR_RESOURCE_SIZE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.resource_size = 1;
+ dst->resource_size = mnl_attr_get_u64(attr);
+ } else if (type == DEVLINK_ATTR_RESOURCE_SIZE_NEW) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.resource_size_new = 1;
+ dst->resource_size_new = mnl_attr_get_u64(attr);
+ } else if (type == DEVLINK_ATTR_RESOURCE_SIZE_VALID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.resource_size_valid = 1;
+ dst->resource_size_valid = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_RESOURCE_SIZE_MIN) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.resource_size_min = 1;
+ dst->resource_size_min = mnl_attr_get_u64(attr);
+ } else if (type == DEVLINK_ATTR_RESOURCE_SIZE_MAX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.resource_size_max = 1;
+ dst->resource_size_max = mnl_attr_get_u64(attr);
+ } else if (type == DEVLINK_ATTR_RESOURCE_SIZE_GRAN) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.resource_size_gran = 1;
+ dst->resource_size_gran = mnl_attr_get_u64(attr);
+ } else if (type == DEVLINK_ATTR_RESOURCE_UNIT) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.resource_unit = 1;
+ dst->resource_unit = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_RESOURCE_OCC) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.resource_occ = 1;
+ dst->resource_occ = mnl_attr_get_u64(attr);
+ }
+ }
+
+ return 0;
+}
+
void devlink_dl_info_version_free(struct devlink_dl_info_version *obj)
{
free(obj->info_version_name);
@@ -194,6 +1114,77 @@ int devlink_dl_info_version_parse(struct ynl_parse_arg *yarg,
return 0;
}
+void devlink_dl_fmsg_free(struct devlink_dl_fmsg *obj)
+{
+ free(obj->fmsg_obj_name);
+}
+
+int devlink_dl_fmsg_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_fmsg *dst = yarg->data;
+ const struct nlattr *attr;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_FMSG_OBJ_NEST_START) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.fmsg_obj_nest_start = 1;
+ } else if (type == DEVLINK_ATTR_FMSG_PAIR_NEST_START) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.fmsg_pair_nest_start = 1;
+ } else if (type == DEVLINK_ATTR_FMSG_ARR_NEST_START) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.fmsg_arr_nest_start = 1;
+ } else if (type == DEVLINK_ATTR_FMSG_NEST_END) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.fmsg_nest_end = 1;
+ } else if (type == DEVLINK_ATTR_FMSG_OBJ_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.fmsg_obj_name_len = len;
+ dst->fmsg_obj_name = malloc(len + 1);
+ memcpy(dst->fmsg_obj_name, mnl_attr_get_str(attr), len);
+ dst->fmsg_obj_name[len] = 0;
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_port_function_free(struct devlink_dl_port_function *obj)
+{
+ free(obj->hw_addr);
+}
+
+int devlink_dl_port_function_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct devlink_dl_port_function *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ if (obj->_present.hw_addr_len)
+ mnl_attr_put(nlh, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, obj->_present.hw_addr_len, obj->hw_addr);
+ if (obj->_present.state)
+ mnl_attr_put_u8(nlh, DEVLINK_PORT_FN_ATTR_STATE, obj->state);
+ if (obj->_present.opstate)
+ mnl_attr_put_u8(nlh, DEVLINK_PORT_FN_ATTR_OPSTATE, obj->opstate);
+ if (obj->_present.caps)
+ mnl_attr_put(nlh, DEVLINK_PORT_FN_ATTR_CAPS, sizeof(struct nla_bitfield32), &obj->caps);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
void
devlink_dl_reload_stats_entry_free(struct devlink_dl_reload_stats_entry *obj)
{
@@ -273,6 +1264,322 @@ int devlink_dl_reload_act_stats_parse(struct ynl_parse_arg *yarg,
return 0;
}
+void devlink_dl_selftest_id_free(struct devlink_dl_selftest_id *obj)
+{
+}
+
+int devlink_dl_selftest_id_put(struct nlmsghdr *nlh, unsigned int attr_type,
+ struct devlink_dl_selftest_id *obj)
+{
+ struct nlattr *nest;
+
+ nest = mnl_attr_nest_start(nlh, attr_type);
+ if (obj->_present.flash)
+ mnl_attr_put(nlh, DEVLINK_ATTR_SELFTEST_ID_FLASH, 0, NULL);
+ mnl_attr_nest_end(nlh, nest);
+
+ return 0;
+}
+
+void
+devlink_dl_dpipe_table_matches_free(struct devlink_dl_dpipe_table_matches *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_dpipe_match; i++)
+ devlink_dl_dpipe_match_free(&obj->dpipe_match[i]);
+ free(obj->dpipe_match);
+}
+
+int devlink_dl_dpipe_table_matches_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_table_matches *dst = yarg->data;
+ unsigned int n_dpipe_match = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->dpipe_match)
+ return ynl_error_parse(yarg, "attribute already present (dl-dpipe-table-matches.dpipe-match)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_MATCH) {
+ n_dpipe_match++;
+ }
+ }
+
+ if (n_dpipe_match) {
+ dst->dpipe_match = calloc(n_dpipe_match, sizeof(*dst->dpipe_match));
+ dst->n_dpipe_match = n_dpipe_match;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_dpipe_match_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_DPIPE_MATCH) {
+ parg.data = &dst->dpipe_match[i];
+ if (devlink_dl_dpipe_match_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+devlink_dl_dpipe_table_actions_free(struct devlink_dl_dpipe_table_actions *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_dpipe_action; i++)
+ devlink_dl_dpipe_action_free(&obj->dpipe_action[i]);
+ free(obj->dpipe_action);
+}
+
+int devlink_dl_dpipe_table_actions_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_table_actions *dst = yarg->data;
+ unsigned int n_dpipe_action = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->dpipe_action)
+ return ynl_error_parse(yarg, "attribute already present (dl-dpipe-table-actions.dpipe-action)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_ACTION) {
+ n_dpipe_action++;
+ }
+ }
+
+ if (n_dpipe_action) {
+ dst->dpipe_action = calloc(n_dpipe_action, sizeof(*dst->dpipe_action));
+ dst->n_dpipe_action = n_dpipe_action;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_dpipe_action_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_DPIPE_ACTION) {
+ parg.data = &dst->dpipe_action[i];
+ if (devlink_dl_dpipe_action_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+devlink_dl_dpipe_entry_match_values_free(struct devlink_dl_dpipe_entry_match_values *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_dpipe_match_value; i++)
+ devlink_dl_dpipe_match_value_free(&obj->dpipe_match_value[i]);
+ free(obj->dpipe_match_value);
+}
+
+int devlink_dl_dpipe_entry_match_values_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_entry_match_values *dst = yarg->data;
+ unsigned int n_dpipe_match_value = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->dpipe_match_value)
+ return ynl_error_parse(yarg, "attribute already present (dl-dpipe-entry-match-values.dpipe-match-value)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_MATCH_VALUE) {
+ n_dpipe_match_value++;
+ }
+ }
+
+ if (n_dpipe_match_value) {
+ dst->dpipe_match_value = calloc(n_dpipe_match_value, sizeof(*dst->dpipe_match_value));
+ dst->n_dpipe_match_value = n_dpipe_match_value;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_dpipe_match_value_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_DPIPE_MATCH_VALUE) {
+ parg.data = &dst->dpipe_match_value[i];
+ if (devlink_dl_dpipe_match_value_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+devlink_dl_dpipe_entry_action_values_free(struct devlink_dl_dpipe_entry_action_values *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_dpipe_action_value; i++)
+ devlink_dl_dpipe_action_value_free(&obj->dpipe_action_value[i]);
+ free(obj->dpipe_action_value);
+}
+
+int devlink_dl_dpipe_entry_action_values_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_entry_action_values *dst = yarg->data;
+ unsigned int n_dpipe_action_value = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->dpipe_action_value)
+ return ynl_error_parse(yarg, "attribute already present (dl-dpipe-entry-action-values.dpipe-action-value)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_ACTION_VALUE) {
+ n_dpipe_action_value++;
+ }
+ }
+
+ if (n_dpipe_action_value) {
+ dst->dpipe_action_value = calloc(n_dpipe_action_value, sizeof(*dst->dpipe_action_value));
+ dst->n_dpipe_action_value = n_dpipe_action_value;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_dpipe_action_value_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_DPIPE_ACTION_VALUE) {
+ parg.data = &dst->dpipe_action_value[i];
+ if (devlink_dl_dpipe_action_value_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+devlink_dl_dpipe_header_fields_free(struct devlink_dl_dpipe_header_fields *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_dpipe_field; i++)
+ devlink_dl_dpipe_field_free(&obj->dpipe_field[i]);
+ free(obj->dpipe_field);
+}
+
+int devlink_dl_dpipe_header_fields_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_header_fields *dst = yarg->data;
+ unsigned int n_dpipe_field = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->dpipe_field)
+ return ynl_error_parse(yarg, "attribute already present (dl-dpipe-header-fields.dpipe-field)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_FIELD) {
+ n_dpipe_field++;
+ }
+ }
+
+ if (n_dpipe_field) {
+ dst->dpipe_field = calloc(n_dpipe_field, sizeof(*dst->dpipe_field));
+ dst->n_dpipe_field = n_dpipe_field;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_dpipe_field_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_DPIPE_FIELD) {
+ parg.data = &dst->dpipe_field[i];
+ if (devlink_dl_dpipe_field_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_resource_list_free(struct devlink_dl_resource_list *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_resource; i++)
+ devlink_dl_resource_free(&obj->resource[i]);
+ free(obj->resource);
+}
+
+int devlink_dl_resource_list_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_resource_list *dst = yarg->data;
+ unsigned int n_resource = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->resource)
+ return ynl_error_parse(yarg, "attribute already present (dl-resource-list.resource)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_RESOURCE) {
+ n_resource++;
+ }
+ }
+
+ if (n_resource) {
+ dst->resource = calloc(n_resource, sizeof(*dst->resource));
+ dst->n_resource = n_resource;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_resource_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_RESOURCE) {
+ parg.data = &dst->resource[i];
+ if (devlink_dl_resource_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
void devlink_dl_reload_act_info_free(struct devlink_dl_reload_act_info *obj)
{
unsigned int i;
@@ -327,6 +1634,186 @@ int devlink_dl_reload_act_info_parse(struct ynl_parse_arg *yarg,
return 0;
}
+void devlink_dl_dpipe_table_free(struct devlink_dl_dpipe_table *obj)
+{
+ free(obj->dpipe_table_name);
+ devlink_dl_dpipe_table_matches_free(&obj->dpipe_table_matches);
+ devlink_dl_dpipe_table_actions_free(&obj->dpipe_table_actions);
+}
+
+int devlink_dl_dpipe_table_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_table *dst = yarg->data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_TABLE_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dpipe_table_name_len = len;
+ dst->dpipe_table_name = malloc(len + 1);
+ memcpy(dst->dpipe_table_name, mnl_attr_get_str(attr), len);
+ dst->dpipe_table_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DPIPE_TABLE_SIZE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_table_size = 1;
+ dst->dpipe_table_size = mnl_attr_get_u64(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_TABLE_MATCHES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_table_matches = 1;
+
+ parg.rsp_policy = &devlink_dl_dpipe_table_matches_nest;
+ parg.data = &dst->dpipe_table_matches;
+ if (devlink_dl_dpipe_table_matches_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == DEVLINK_ATTR_DPIPE_TABLE_ACTIONS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_table_actions = 1;
+
+ parg.rsp_policy = &devlink_dl_dpipe_table_actions_nest;
+ parg.data = &dst->dpipe_table_actions;
+ if (devlink_dl_dpipe_table_actions_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_table_counters_enabled = 1;
+ dst->dpipe_table_counters_enabled = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_table_resource_id = 1;
+ dst->dpipe_table_resource_id = mnl_attr_get_u64(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_table_resource_units = 1;
+ dst->dpipe_table_resource_units = mnl_attr_get_u64(attr);
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_dpipe_entry_free(struct devlink_dl_dpipe_entry *obj)
+{
+ devlink_dl_dpipe_entry_match_values_free(&obj->dpipe_entry_match_values);
+ devlink_dl_dpipe_entry_action_values_free(&obj->dpipe_entry_action_values);
+}
+
+int devlink_dl_dpipe_entry_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_entry *dst = yarg->data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_ENTRY_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_entry_index = 1;
+ dst->dpipe_entry_index = mnl_attr_get_u64(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_entry_match_values = 1;
+
+ parg.rsp_policy = &devlink_dl_dpipe_entry_match_values_nest;
+ parg.data = &dst->dpipe_entry_match_values;
+ if (devlink_dl_dpipe_entry_match_values_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_entry_action_values = 1;
+
+ parg.rsp_policy = &devlink_dl_dpipe_entry_action_values_nest;
+ parg.data = &dst->dpipe_entry_action_values;
+ if (devlink_dl_dpipe_entry_action_values_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ } else if (type == DEVLINK_ATTR_DPIPE_ENTRY_COUNTER) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_entry_counter = 1;
+ dst->dpipe_entry_counter = mnl_attr_get_u64(attr);
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_dpipe_header_free(struct devlink_dl_dpipe_header *obj)
+{
+ free(obj->dpipe_header_name);
+ devlink_dl_dpipe_header_fields_free(&obj->dpipe_header_fields);
+}
+
+int devlink_dl_dpipe_header_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_header *dst = yarg->data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_HEADER_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dpipe_header_name_len = len;
+ dst->dpipe_header_name = malloc(len + 1);
+ memcpy(dst->dpipe_header_name, mnl_attr_get_str(attr), len);
+ dst->dpipe_header_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DPIPE_HEADER_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_header_id = 1;
+ dst->dpipe_header_id = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_HEADER_GLOBAL) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_header_global = 1;
+ dst->dpipe_header_global = mnl_attr_get_u8(attr);
+ } else if (type == DEVLINK_ATTR_DPIPE_HEADER_FIELDS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_header_fields = 1;
+
+ parg.rsp_policy = &devlink_dl_dpipe_header_fields_nest;
+ parg.data = &dst->dpipe_header_fields;
+ if (devlink_dl_dpipe_header_fields_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return 0;
+}
+
void devlink_dl_reload_stats_free(struct devlink_dl_reload_stats *obj)
{
unsigned int i;
@@ -376,6 +1863,153 @@ int devlink_dl_reload_stats_parse(struct ynl_parse_arg *yarg,
return 0;
}
+void devlink_dl_dpipe_tables_free(struct devlink_dl_dpipe_tables *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_dpipe_table; i++)
+ devlink_dl_dpipe_table_free(&obj->dpipe_table[i]);
+ free(obj->dpipe_table);
+}
+
+int devlink_dl_dpipe_tables_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_tables *dst = yarg->data;
+ unsigned int n_dpipe_table = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->dpipe_table)
+ return ynl_error_parse(yarg, "attribute already present (dl-dpipe-tables.dpipe-table)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_TABLE) {
+ n_dpipe_table++;
+ }
+ }
+
+ if (n_dpipe_table) {
+ dst->dpipe_table = calloc(n_dpipe_table, sizeof(*dst->dpipe_table));
+ dst->n_dpipe_table = n_dpipe_table;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_dpipe_table_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_DPIPE_TABLE) {
+ parg.data = &dst->dpipe_table[i];
+ if (devlink_dl_dpipe_table_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_dpipe_entries_free(struct devlink_dl_dpipe_entries *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_dpipe_entry; i++)
+ devlink_dl_dpipe_entry_free(&obj->dpipe_entry[i]);
+ free(obj->dpipe_entry);
+}
+
+int devlink_dl_dpipe_entries_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_entries *dst = yarg->data;
+ unsigned int n_dpipe_entry = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->dpipe_entry)
+ return ynl_error_parse(yarg, "attribute already present (dl-dpipe-entries.dpipe-entry)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_ENTRY) {
+ n_dpipe_entry++;
+ }
+ }
+
+ if (n_dpipe_entry) {
+ dst->dpipe_entry = calloc(n_dpipe_entry, sizeof(*dst->dpipe_entry));
+ dst->n_dpipe_entry = n_dpipe_entry;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_dpipe_entry_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_DPIPE_ENTRY) {
+ parg.data = &dst->dpipe_entry[i];
+ if (devlink_dl_dpipe_entry_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void devlink_dl_dpipe_headers_free(struct devlink_dl_dpipe_headers *obj)
+{
+ unsigned int i;
+
+ for (i = 0; i < obj->n_dpipe_header; i++)
+ devlink_dl_dpipe_header_free(&obj->dpipe_header[i]);
+ free(obj->dpipe_header);
+}
+
+int devlink_dl_dpipe_headers_parse(struct ynl_parse_arg *yarg,
+ const struct nlattr *nested)
+{
+ struct devlink_dl_dpipe_headers *dst = yarg->data;
+ unsigned int n_dpipe_header = 0;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+ int i;
+
+ parg.ys = yarg->ys;
+
+ if (dst->dpipe_header)
+ return ynl_error_parse(yarg, "attribute already present (dl-dpipe-headers.dpipe-header)");
+
+ mnl_attr_for_each_nested(attr, nested) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_DPIPE_HEADER) {
+ n_dpipe_header++;
+ }
+ }
+
+ if (n_dpipe_header) {
+ dst->dpipe_header = calloc(n_dpipe_header, sizeof(*dst->dpipe_header));
+ dst->n_dpipe_header = n_dpipe_header;
+ i = 0;
+ parg.rsp_policy = &devlink_dl_dpipe_header_nest;
+ mnl_attr_for_each_nested(attr, nested) {
+ if (mnl_attr_get_type(attr) == DEVLINK_ATTR_DPIPE_HEADER) {
+ parg.data = &dst->dpipe_header[i];
+ if (devlink_dl_dpipe_header_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ i++;
+ }
+ }
+ }
+
+ return 0;
+}
+
void devlink_dl_dev_stats_free(struct devlink_dl_dev_stats *obj)
{
devlink_dl_reload_stats_free(&obj->reload_stats);
@@ -475,11 +2109,6 @@ int devlink_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
return MNL_CB_ERROR;
dst->_present.reload_failed = 1;
dst->reload_failed = mnl_attr_get_u8(attr);
- } else if (type == DEVLINK_ATTR_RELOAD_ACTION) {
- if (ynl_attr_validate(yarg, attr))
- return MNL_CB_ERROR;
- dst->_present.reload_action = 1;
- dst->reload_action = mnl_attr_get_u8(attr);
} else if (type == DEVLINK_ATTR_DEV_STATS) {
if (ynl_attr_validate(yarg, attr))
return MNL_CB_ERROR;
@@ -756,6 +2385,241 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_PORT_SET ============== */
+/* DEVLINK_CMD_PORT_SET - do */
+void devlink_port_set_req_free(struct devlink_port_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ devlink_dl_port_function_free(&req->port_function);
+ free(req);
+}
+
+int devlink_port_set(struct ynl_sock *ys, struct devlink_port_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PORT_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.port_type)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_PORT_TYPE, req->port_type);
+ if (req->_present.port_function)
+ devlink_dl_port_function_put(nlh, DEVLINK_ATTR_PORT_FUNCTION, &req->port_function);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_PORT_NEW ============== */
+/* DEVLINK_CMD_PORT_NEW - do */
+void devlink_port_new_req_free(struct devlink_port_new_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_port_new_rsp_free(struct devlink_port_new_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_port_new_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct devlink_port_new_rsp *dst;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_port_new_rsp *
+devlink_port_new(struct ynl_sock *ys, struct devlink_port_new_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_port_new_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PORT_NEW, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.port_flavour)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_PORT_FLAVOUR, req->port_flavour);
+ if (req->_present.port_pci_pf_number)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, req->port_pci_pf_number);
+ if (req->_present.port_pci_sf_number)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_PCI_SF_NUMBER, req->port_pci_sf_number);
+ if (req->_present.port_controller_number)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER, req->port_controller_number);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_port_new_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_PORT_NEW;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_port_new_rsp_free(rsp);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_PORT_DEL ============== */
+/* DEVLINK_CMD_PORT_DEL - do */
+void devlink_port_del_req_free(struct devlink_port_del_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_port_del(struct ynl_sock *ys, struct devlink_port_del_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PORT_DEL, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_PORT_SPLIT ============== */
+/* DEVLINK_CMD_PORT_SPLIT - do */
+void devlink_port_split_req_free(struct devlink_port_split_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_port_split(struct ynl_sock *ys, struct devlink_port_split_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PORT_SPLIT, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.port_split_count)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_SPLIT_COUNT, req->port_split_count);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_PORT_UNSPLIT ============== */
+/* DEVLINK_CMD_PORT_UNSPLIT - do */
+void devlink_port_unsplit_req_free(struct devlink_port_unsplit_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_port_unsplit(struct ynl_sock *ys,
+ struct devlink_port_unsplit_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PORT_UNSPLIT, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_SB_GET ============== */
/* DEVLINK_CMD_SB_GET - do */
void devlink_sb_get_req_free(struct devlink_sb_get_req *req)
@@ -1048,6 +2912,44 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_SB_POOL_SET ============== */
+/* DEVLINK_CMD_SB_POOL_SET - do */
+void devlink_sb_pool_set_req_free(struct devlink_sb_pool_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_sb_pool_set(struct ynl_sock *ys,
+ struct devlink_sb_pool_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_POOL_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.sb_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
+ if (req->_present.sb_pool_index)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_SB_POOL_INDEX, req->sb_pool_index);
+ if (req->_present.sb_pool_threshold_type)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE, req->sb_pool_threshold_type);
+ if (req->_present.sb_pool_size)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_POOL_SIZE, req->sb_pool_size);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_SB_PORT_POOL_GET ============== */
/* DEVLINK_CMD_SB_PORT_POOL_GET - do */
void
@@ -1209,6 +3111,45 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_SB_PORT_POOL_SET ============== */
+/* DEVLINK_CMD_SB_PORT_POOL_SET - do */
+void
+devlink_sb_port_pool_set_req_free(struct devlink_sb_port_pool_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_sb_port_pool_set(struct ynl_sock *ys,
+ struct devlink_sb_port_pool_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_PORT_POOL_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.sb_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
+ if (req->_present.sb_pool_index)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_SB_POOL_INDEX, req->sb_pool_index);
+ if (req->_present.sb_threshold)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_THRESHOLD, req->sb_threshold);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_SB_TC_POOL_BIND_GET ============== */
/* DEVLINK_CMD_SB_TC_POOL_BIND_GET - do */
void
@@ -1378,6 +3319,840 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_SB_TC_POOL_BIND_SET ============== */
+/* DEVLINK_CMD_SB_TC_POOL_BIND_SET - do */
+void
+devlink_sb_tc_pool_bind_set_req_free(struct devlink_sb_tc_pool_bind_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_sb_tc_pool_bind_set(struct ynl_sock *ys,
+ struct devlink_sb_tc_pool_bind_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_TC_POOL_BIND_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.sb_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
+ if (req->_present.sb_pool_index)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_SB_POOL_INDEX, req->sb_pool_index);
+ if (req->_present.sb_pool_type)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_SB_POOL_TYPE, req->sb_pool_type);
+ if (req->_present.sb_tc_index)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_SB_TC_INDEX, req->sb_tc_index);
+ if (req->_present.sb_threshold)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_THRESHOLD, req->sb_threshold);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_SB_OCC_SNAPSHOT ============== */
+/* DEVLINK_CMD_SB_OCC_SNAPSHOT - do */
+void devlink_sb_occ_snapshot_req_free(struct devlink_sb_occ_snapshot_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_sb_occ_snapshot(struct ynl_sock *ys,
+ struct devlink_sb_occ_snapshot_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_OCC_SNAPSHOT, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.sb_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_SB_OCC_MAX_CLEAR ============== */
+/* DEVLINK_CMD_SB_OCC_MAX_CLEAR - do */
+void
+devlink_sb_occ_max_clear_req_free(struct devlink_sb_occ_max_clear_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_sb_occ_max_clear(struct ynl_sock *ys,
+ struct devlink_sb_occ_max_clear_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SB_OCC_MAX_CLEAR, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.sb_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_SB_INDEX, req->sb_index);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_ESWITCH_GET ============== */
+/* DEVLINK_CMD_ESWITCH_GET - do */
+void devlink_eswitch_get_req_free(struct devlink_eswitch_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_eswitch_get_rsp_free(struct devlink_eswitch_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_eswitch_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_eswitch_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_ESWITCH_MODE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.eswitch_mode = 1;
+ dst->eswitch_mode = mnl_attr_get_u16(attr);
+ } else if (type == DEVLINK_ATTR_ESWITCH_INLINE_MODE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.eswitch_inline_mode = 1;
+ dst->eswitch_inline_mode = mnl_attr_get_u16(attr);
+ } else if (type == DEVLINK_ATTR_ESWITCH_ENCAP_MODE) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.eswitch_encap_mode = 1;
+ dst->eswitch_encap_mode = mnl_attr_get_u8(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_eswitch_get_rsp *
+devlink_eswitch_get(struct ynl_sock *ys, struct devlink_eswitch_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_eswitch_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_ESWITCH_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_eswitch_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_ESWITCH_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_eswitch_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_ESWITCH_SET ============== */
+/* DEVLINK_CMD_ESWITCH_SET - do */
+void devlink_eswitch_set_req_free(struct devlink_eswitch_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_eswitch_set(struct ynl_sock *ys,
+ struct devlink_eswitch_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_ESWITCH_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.eswitch_mode)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_ESWITCH_MODE, req->eswitch_mode);
+ if (req->_present.eswitch_inline_mode)
+ mnl_attr_put_u16(nlh, DEVLINK_ATTR_ESWITCH_INLINE_MODE, req->eswitch_inline_mode);
+ if (req->_present.eswitch_encap_mode)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, req->eswitch_encap_mode);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_DPIPE_TABLE_GET ============== */
+/* DEVLINK_CMD_DPIPE_TABLE_GET - do */
+void devlink_dpipe_table_get_req_free(struct devlink_dpipe_table_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->dpipe_table_name);
+ free(req);
+}
+
+void devlink_dpipe_table_get_rsp_free(struct devlink_dpipe_table_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ devlink_dl_dpipe_tables_free(&rsp->dpipe_tables);
+ free(rsp);
+}
+
+int devlink_dpipe_table_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_dpipe_table_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DPIPE_TABLES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_tables = 1;
+
+ parg.rsp_policy = &devlink_dl_dpipe_tables_nest;
+ parg.data = &dst->dpipe_tables;
+ if (devlink_dl_dpipe_tables_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_dpipe_table_get_rsp *
+devlink_dpipe_table_get(struct ynl_sock *ys,
+ struct devlink_dpipe_table_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_dpipe_table_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_DPIPE_TABLE_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.dpipe_table_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DPIPE_TABLE_NAME, req->dpipe_table_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_dpipe_table_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_DPIPE_TABLE_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_dpipe_table_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_DPIPE_ENTRIES_GET ============== */
+/* DEVLINK_CMD_DPIPE_ENTRIES_GET - do */
+void
+devlink_dpipe_entries_get_req_free(struct devlink_dpipe_entries_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->dpipe_table_name);
+ free(req);
+}
+
+void
+devlink_dpipe_entries_get_rsp_free(struct devlink_dpipe_entries_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ devlink_dl_dpipe_entries_free(&rsp->dpipe_entries);
+ free(rsp);
+}
+
+int devlink_dpipe_entries_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_dpipe_entries_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DPIPE_ENTRIES) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_entries = 1;
+
+ parg.rsp_policy = &devlink_dl_dpipe_entries_nest;
+ parg.data = &dst->dpipe_entries;
+ if (devlink_dl_dpipe_entries_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_dpipe_entries_get_rsp *
+devlink_dpipe_entries_get(struct ynl_sock *ys,
+ struct devlink_dpipe_entries_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_dpipe_entries_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_DPIPE_ENTRIES_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.dpipe_table_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DPIPE_TABLE_NAME, req->dpipe_table_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_dpipe_entries_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_dpipe_entries_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_DPIPE_HEADERS_GET ============== */
+/* DEVLINK_CMD_DPIPE_HEADERS_GET - do */
+void
+devlink_dpipe_headers_get_req_free(struct devlink_dpipe_headers_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void
+devlink_dpipe_headers_get_rsp_free(struct devlink_dpipe_headers_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ devlink_dl_dpipe_headers_free(&rsp->dpipe_headers);
+ free(rsp);
+}
+
+int devlink_dpipe_headers_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_dpipe_headers_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DPIPE_HEADERS) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.dpipe_headers = 1;
+
+ parg.rsp_policy = &devlink_dl_dpipe_headers_nest;
+ parg.data = &dst->dpipe_headers;
+ if (devlink_dl_dpipe_headers_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_dpipe_headers_get_rsp *
+devlink_dpipe_headers_get(struct ynl_sock *ys,
+ struct devlink_dpipe_headers_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_dpipe_headers_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_DPIPE_HEADERS_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_dpipe_headers_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_DPIPE_HEADERS_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_dpipe_headers_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET ============== */
+/* DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET - do */
+void
+devlink_dpipe_table_counters_set_req_free(struct devlink_dpipe_table_counters_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->dpipe_table_name);
+ free(req);
+}
+
+int devlink_dpipe_table_counters_set(struct ynl_sock *ys,
+ struct devlink_dpipe_table_counters_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.dpipe_table_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DPIPE_TABLE_NAME, req->dpipe_table_name);
+ if (req->_present.dpipe_table_counters_enabled)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED, req->dpipe_table_counters_enabled);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_RESOURCE_SET ============== */
+/* DEVLINK_CMD_RESOURCE_SET - do */
+void devlink_resource_set_req_free(struct devlink_resource_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_resource_set(struct ynl_sock *ys,
+ struct devlink_resource_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_RESOURCE_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.resource_id)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_RESOURCE_ID, req->resource_id);
+ if (req->_present.resource_size)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_RESOURCE_SIZE, req->resource_size);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_RESOURCE_DUMP ============== */
+/* DEVLINK_CMD_RESOURCE_DUMP - do */
+void devlink_resource_dump_req_free(struct devlink_resource_dump_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_resource_dump_rsp_free(struct devlink_resource_dump_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ devlink_dl_resource_list_free(&rsp->resource_list);
+ free(rsp);
+}
+
+int devlink_resource_dump_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_resource_dump_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_RESOURCE_LIST) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.resource_list = 1;
+
+ parg.rsp_policy = &devlink_dl_resource_list_nest;
+ parg.data = &dst->resource_list;
+ if (devlink_dl_resource_list_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_resource_dump_rsp *
+devlink_resource_dump(struct ynl_sock *ys,
+ struct devlink_resource_dump_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_resource_dump_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_RESOURCE_DUMP, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_resource_dump_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_RESOURCE_DUMP;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_resource_dump_rsp_free(rsp);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_RELOAD ============== */
+/* DEVLINK_CMD_RELOAD - do */
+void devlink_reload_req_free(struct devlink_reload_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_reload_rsp_free(struct devlink_reload_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_reload_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct ynl_parse_arg *yarg = data;
+ struct devlink_reload_rsp *dst;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.reload_actions_performed = 1;
+ memcpy(&dst->reload_actions_performed, mnl_attr_get_payload(attr), sizeof(struct nla_bitfield32));
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_reload_rsp *
+devlink_reload(struct ynl_sock *ys, struct devlink_reload_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_reload_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_RELOAD, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.reload_action)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_RELOAD_ACTION, req->reload_action);
+ if (req->_present.reload_limits)
+ mnl_attr_put(nlh, DEVLINK_ATTR_RELOAD_LIMITS, sizeof(struct nla_bitfield32), &req->reload_limits);
+ if (req->_present.netns_pid)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_NETNS_PID, req->netns_pid);
+ if (req->_present.netns_fd)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_NETNS_FD, req->netns_fd);
+ if (req->_present.netns_id)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_NETNS_ID, req->netns_id);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_reload_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_RELOAD;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_reload_rsp_free(rsp);
+ return NULL;
+}
+
/* ============== DEVLINK_CMD_PARAM_GET ============== */
/* DEVLINK_CMD_PARAM_GET - do */
void devlink_param_get_req_free(struct devlink_param_get_req *req)
@@ -1530,6 +4305,42 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_PARAM_SET ============== */
+/* DEVLINK_CMD_PARAM_SET - do */
+void devlink_param_set_req_free(struct devlink_param_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->param_name);
+ free(req);
+}
+
+int devlink_param_set(struct ynl_sock *ys, struct devlink_param_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PARAM_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.param_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_PARAM_NAME, req->param_name);
+ if (req->_present.param_type)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_PARAM_TYPE, req->param_type);
+ if (req->_present.param_value_cmode)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_PARAM_VALUE_CMODE, req->param_value_cmode);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_REGION_GET ============== */
/* DEVLINK_CMD_REGION_GET - do */
void devlink_region_get_req_free(struct devlink_region_get_req *req)
@@ -1689,6 +4500,446 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_REGION_NEW ============== */
+/* DEVLINK_CMD_REGION_NEW - do */
+void devlink_region_new_req_free(struct devlink_region_new_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->region_name);
+ free(req);
+}
+
+void devlink_region_new_rsp_free(struct devlink_region_new_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp->region_name);
+ free(rsp);
+}
+
+int devlink_region_new_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_region_new_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_REGION_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.region_name_len = len;
+ dst->region_name = malloc(len + 1);
+ memcpy(dst->region_name, mnl_attr_get_str(attr), len);
+ dst->region_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_REGION_SNAPSHOT_ID) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.region_snapshot_id = 1;
+ dst->region_snapshot_id = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_region_new_rsp *
+devlink_region_new(struct ynl_sock *ys, struct devlink_region_new_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_region_new_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_REGION_NEW, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.region_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_REGION_NAME, req->region_name);
+ if (req->_present.region_snapshot_id)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_REGION_SNAPSHOT_ID, req->region_snapshot_id);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_region_new_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_REGION_NEW;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_region_new_rsp_free(rsp);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_REGION_DEL ============== */
+/* DEVLINK_CMD_REGION_DEL - do */
+void devlink_region_del_req_free(struct devlink_region_del_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->region_name);
+ free(req);
+}
+
+int devlink_region_del(struct ynl_sock *ys, struct devlink_region_del_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_REGION_DEL, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.region_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_REGION_NAME, req->region_name);
+ if (req->_present.region_snapshot_id)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_REGION_SNAPSHOT_ID, req->region_snapshot_id);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_REGION_READ ============== */
+/* DEVLINK_CMD_REGION_READ - dump */
+int devlink_region_read_rsp_dump_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_region_read_rsp_dump *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ } else if (type == DEVLINK_ATTR_REGION_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.region_name_len = len;
+ dst->region_name = malloc(len + 1);
+ memcpy(dst->region_name, mnl_attr_get_str(attr), len);
+ dst->region_name[len] = 0;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+void
+devlink_region_read_rsp_list_free(struct devlink_region_read_rsp_list *rsp)
+{
+ struct devlink_region_read_rsp_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp->obj.region_name);
+ free(rsp);
+ }
+}
+
+struct devlink_region_read_rsp_list *
+devlink_region_read_dump(struct ynl_sock *ys,
+ struct devlink_region_read_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_region_read_rsp_list);
+ yds.cb = devlink_region_read_rsp_dump_parse;
+ yds.rsp_cmd = DEVLINK_CMD_REGION_READ;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_REGION_READ, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.region_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_REGION_NAME, req->region_name);
+ if (req->_present.region_snapshot_id)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_REGION_SNAPSHOT_ID, req->region_snapshot_id);
+ if (req->_present.region_direct)
+ mnl_attr_put(nlh, DEVLINK_ATTR_REGION_DIRECT, 0, NULL);
+ if (req->_present.region_chunk_addr)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_REGION_CHUNK_ADDR, req->region_chunk_addr);
+ if (req->_present.region_chunk_len)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_REGION_CHUNK_LEN, req->region_chunk_len);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_region_read_rsp_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_PORT_PARAM_GET ============== */
+/* DEVLINK_CMD_PORT_PARAM_GET - do */
+void devlink_port_param_get_req_free(struct devlink_port_param_get_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+void devlink_port_param_get_rsp_free(struct devlink_port_param_get_rsp *rsp)
+{
+ free(rsp->bus_name);
+ free(rsp->dev_name);
+ free(rsp);
+}
+
+int devlink_port_param_get_rsp_parse(const struct nlmsghdr *nlh, void *data)
+{
+ struct devlink_port_param_get_rsp *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+
+ dst = yarg->data;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_BUS_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.bus_name_len = len;
+ dst->bus_name = malloc(len + 1);
+ memcpy(dst->bus_name, mnl_attr_get_str(attr), len);
+ dst->bus_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_DEV_NAME) {
+ unsigned int len;
+
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+
+ len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));
+ dst->_present.dev_name_len = len;
+ dst->dev_name = malloc(len + 1);
+ memcpy(dst->dev_name, mnl_attr_get_str(attr), len);
+ dst->dev_name[len] = 0;
+ } else if (type == DEVLINK_ATTR_PORT_INDEX) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.port_index = 1;
+ dst->port_index = mnl_attr_get_u32(attr);
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+struct devlink_port_param_get_rsp *
+devlink_port_param_get(struct ynl_sock *ys,
+ struct devlink_port_param_get_req *req)
+{
+ struct ynl_req_state yrs = { .yarg = { .ys = ys, }, };
+ struct devlink_port_param_get_rsp *rsp;
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PORT_PARAM_GET, 1);
+ ys->req_policy = &devlink_nest;
+ yrs.yarg.rsp_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+
+ rsp = calloc(1, sizeof(*rsp));
+ yrs.yarg.data = rsp;
+ yrs.cb = devlink_port_param_get_rsp_parse;
+ yrs.rsp_cmd = DEVLINK_CMD_PORT_PARAM_GET;
+
+ err = ynl_exec(ys, nlh, &yrs);
+ if (err < 0)
+ goto err_free;
+
+ return rsp;
+
+err_free:
+ devlink_port_param_get_rsp_free(rsp);
+ return NULL;
+}
+
+/* DEVLINK_CMD_PORT_PARAM_GET - dump */
+void devlink_port_param_get_list_free(struct devlink_port_param_get_list *rsp)
+{
+ struct devlink_port_param_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.bus_name);
+ free(rsp->obj.dev_name);
+ free(rsp);
+ }
+}
+
+struct devlink_port_param_get_list *
+devlink_port_param_get_dump(struct ynl_sock *ys)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_port_param_get_list);
+ yds.cb = devlink_port_param_get_rsp_parse;
+ yds.rsp_cmd = DEVLINK_CMD_PORT_PARAM_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_PORT_PARAM_GET, 1);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_port_param_get_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_PORT_PARAM_SET ============== */
+/* DEVLINK_CMD_PORT_PARAM_SET - do */
+void devlink_port_param_set_req_free(struct devlink_port_param_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_port_param_set(struct ynl_sock *ys,
+ struct devlink_port_param_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_PORT_PARAM_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_INFO_GET ============== */
/* DEVLINK_CMD_INFO_GET - do */
void devlink_info_get_req_free(struct devlink_info_get_req *req)
@@ -2093,6 +5344,276 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_SET ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_SET - do */
+void
+devlink_health_reporter_set_req_free(struct devlink_health_reporter_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->health_reporter_name);
+ free(req);
+}
+
+int devlink_health_reporter_set(struct ynl_sock *ys,
+ struct devlink_health_reporter_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_HEALTH_REPORTER_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.health_reporter_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
+ if (req->_present.health_reporter_graceful_period)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD, req->health_reporter_graceful_period);
+ if (req->_present.health_reporter_auto_recover)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER, req->health_reporter_auto_recover);
+ if (req->_present.health_reporter_auto_dump)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP, req->health_reporter_auto_dump);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_RECOVER ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_RECOVER - do */
+void
+devlink_health_reporter_recover_req_free(struct devlink_health_reporter_recover_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->health_reporter_name);
+ free(req);
+}
+
+int devlink_health_reporter_recover(struct ynl_sock *ys,
+ struct devlink_health_reporter_recover_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_HEALTH_REPORTER_RECOVER, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.health_reporter_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE - do */
+void
+devlink_health_reporter_diagnose_req_free(struct devlink_health_reporter_diagnose_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->health_reporter_name);
+ free(req);
+}
+
+int devlink_health_reporter_diagnose(struct ynl_sock *ys,
+ struct devlink_health_reporter_diagnose_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.health_reporter_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET - dump */
+int devlink_health_reporter_dump_get_rsp_dump_parse(const struct nlmsghdr *nlh,
+ void *data)
+{
+ struct devlink_health_reporter_dump_get_rsp_dump *dst;
+ struct ynl_parse_arg *yarg = data;
+ const struct nlattr *attr;
+ struct ynl_parse_arg parg;
+
+ dst = yarg->data;
+ parg.ys = yarg->ys;
+
+ mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+ unsigned int type = mnl_attr_get_type(attr);
+
+ if (type == DEVLINK_ATTR_FMSG) {
+ if (ynl_attr_validate(yarg, attr))
+ return MNL_CB_ERROR;
+ dst->_present.fmsg = 1;
+
+ parg.rsp_policy = &devlink_dl_fmsg_nest;
+ parg.data = &dst->fmsg;
+ if (devlink_dl_fmsg_parse(&parg, attr))
+ return MNL_CB_ERROR;
+ }
+ }
+
+ return MNL_CB_OK;
+}
+
+void
+devlink_health_reporter_dump_get_rsp_list_free(struct devlink_health_reporter_dump_get_rsp_list *rsp)
+{
+ struct devlink_health_reporter_dump_get_rsp_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ devlink_dl_fmsg_free(&rsp->obj.fmsg);
+ free(rsp);
+ }
+}
+
+struct devlink_health_reporter_dump_get_rsp_list *
+devlink_health_reporter_dump_get_dump(struct ynl_sock *ys,
+ struct devlink_health_reporter_dump_get_req_dump *req)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct devlink_health_reporter_dump_get_rsp_list);
+ yds.cb = devlink_health_reporter_dump_get_rsp_dump_parse;
+ yds.rsp_cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET;
+ yds.rsp_policy = &devlink_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.health_reporter_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ devlink_health_reporter_dump_get_rsp_list_free(yds.first);
+ return NULL;
+}
+
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR - do */
+void
+devlink_health_reporter_dump_clear_req_free(struct devlink_health_reporter_dump_clear_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->health_reporter_name);
+ free(req);
+}
+
+int devlink_health_reporter_dump_clear(struct ynl_sock *ys,
+ struct devlink_health_reporter_dump_clear_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.health_reporter_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_FLASH_UPDATE ============== */
+/* DEVLINK_CMD_FLASH_UPDATE - do */
+void devlink_flash_update_req_free(struct devlink_flash_update_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->flash_update_file_name);
+ free(req->flash_update_component);
+ free(req);
+}
+
+int devlink_flash_update(struct ynl_sock *ys,
+ struct devlink_flash_update_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_FLASH_UPDATE, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.flash_update_file_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME, req->flash_update_file_name);
+ if (req->_present.flash_update_component_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT, req->flash_update_component);
+ if (req->_present.flash_update_overwrite_mask)
+ mnl_attr_put(nlh, DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK, sizeof(struct nla_bitfield32), &req->flash_update_overwrite_mask);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_TRAP_GET ============== */
/* DEVLINK_CMD_TRAP_GET - do */
void devlink_trap_get_req_free(struct devlink_trap_get_req *req)
@@ -2245,6 +5766,40 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_TRAP_SET ============== */
+/* DEVLINK_CMD_TRAP_SET - do */
+void devlink_trap_set_req_free(struct devlink_trap_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->trap_name);
+ free(req);
+}
+
+int devlink_trap_set(struct ynl_sock *ys, struct devlink_trap_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_TRAP_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.trap_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_TRAP_NAME, req->trap_name);
+ if (req->_present.trap_action)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_TRAP_ACTION, req->trap_action);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_TRAP_GROUP_GET ============== */
/* DEVLINK_CMD_TRAP_GROUP_GET - do */
void devlink_trap_group_get_req_free(struct devlink_trap_group_get_req *req)
@@ -2398,6 +5953,43 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_TRAP_GROUP_SET ============== */
+/* DEVLINK_CMD_TRAP_GROUP_SET - do */
+void devlink_trap_group_set_req_free(struct devlink_trap_group_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->trap_group_name);
+ free(req);
+}
+
+int devlink_trap_group_set(struct ynl_sock *ys,
+ struct devlink_trap_group_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_TRAP_GROUP_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.trap_group_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_TRAP_GROUP_NAME, req->trap_group_name);
+ if (req->_present.trap_action)
+ mnl_attr_put_u8(nlh, DEVLINK_ATTR_TRAP_ACTION, req->trap_action);
+ if (req->_present.trap_policer_id)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_TRAP_POLICER_ID, req->trap_policer_id);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_TRAP_POLICER_GET ============== */
/* DEVLINK_CMD_TRAP_POLICER_GET - do */
void
@@ -2545,6 +6137,79 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_TRAP_POLICER_SET ============== */
+/* DEVLINK_CMD_TRAP_POLICER_SET - do */
+void
+devlink_trap_policer_set_req_free(struct devlink_trap_policer_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req);
+}
+
+int devlink_trap_policer_set(struct ynl_sock *ys,
+ struct devlink_trap_policer_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_TRAP_POLICER_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.trap_policer_id)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_TRAP_POLICER_ID, req->trap_policer_id);
+ if (req->_present.trap_policer_rate)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_TRAP_POLICER_RATE, req->trap_policer_rate);
+ if (req->_present.trap_policer_burst)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_TRAP_POLICER_BURST, req->trap_policer_burst);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_TEST ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_TEST - do */
+void
+devlink_health_reporter_test_req_free(struct devlink_health_reporter_test_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->health_reporter_name);
+ free(req);
+}
+
+int devlink_health_reporter_test(struct ynl_sock *ys,
+ struct devlink_health_reporter_test_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_HEALTH_REPORTER_TEST, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.port_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_PORT_INDEX, req->port_index);
+ if (req->_present.health_reporter_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_HEALTH_REPORTER_NAME, req->health_reporter_name);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_RATE_GET ============== */
/* DEVLINK_CMD_RATE_GET - do */
void devlink_rate_get_req_free(struct devlink_rate_get_req *req)
@@ -2704,6 +6369,124 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_RATE_SET ============== */
+/* DEVLINK_CMD_RATE_SET - do */
+void devlink_rate_set_req_free(struct devlink_rate_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->rate_node_name);
+ free(req->rate_parent_node_name);
+ free(req);
+}
+
+int devlink_rate_set(struct ynl_sock *ys, struct devlink_rate_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_RATE_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.rate_node_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_RATE_NODE_NAME, req->rate_node_name);
+ if (req->_present.rate_tx_share)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_RATE_TX_SHARE, req->rate_tx_share);
+ if (req->_present.rate_tx_max)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_RATE_TX_MAX, req->rate_tx_max);
+ if (req->_present.rate_tx_priority)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_RATE_TX_PRIORITY, req->rate_tx_priority);
+ if (req->_present.rate_tx_weight)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_RATE_TX_WEIGHT, req->rate_tx_weight);
+ if (req->_present.rate_parent_node_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_RATE_PARENT_NODE_NAME, req->rate_parent_node_name);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_RATE_NEW ============== */
+/* DEVLINK_CMD_RATE_NEW - do */
+void devlink_rate_new_req_free(struct devlink_rate_new_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->rate_node_name);
+ free(req->rate_parent_node_name);
+ free(req);
+}
+
+int devlink_rate_new(struct ynl_sock *ys, struct devlink_rate_new_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_RATE_NEW, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.rate_node_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_RATE_NODE_NAME, req->rate_node_name);
+ if (req->_present.rate_tx_share)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_RATE_TX_SHARE, req->rate_tx_share);
+ if (req->_present.rate_tx_max)
+ mnl_attr_put_u64(nlh, DEVLINK_ATTR_RATE_TX_MAX, req->rate_tx_max);
+ if (req->_present.rate_tx_priority)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_RATE_TX_PRIORITY, req->rate_tx_priority);
+ if (req->_present.rate_tx_weight)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_RATE_TX_WEIGHT, req->rate_tx_weight);
+ if (req->_present.rate_parent_node_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_RATE_PARENT_NODE_NAME, req->rate_parent_node_name);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
+/* ============== DEVLINK_CMD_RATE_DEL ============== */
+/* DEVLINK_CMD_RATE_DEL - do */
+void devlink_rate_del_req_free(struct devlink_rate_del_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->rate_node_name);
+ free(req);
+}
+
+int devlink_rate_del(struct ynl_sock *ys, struct devlink_rate_del_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_RATE_DEL, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.rate_node_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_RATE_NODE_NAME, req->rate_node_name);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_LINECARD_GET ============== */
/* DEVLINK_CMD_LINECARD_GET - do */
void devlink_linecard_get_req_free(struct devlink_linecard_get_req *req)
@@ -2847,6 +6630,41 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_LINECARD_SET ============== */
+/* DEVLINK_CMD_LINECARD_SET - do */
+void devlink_linecard_set_req_free(struct devlink_linecard_set_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ free(req->linecard_type);
+ free(req);
+}
+
+int devlink_linecard_set(struct ynl_sock *ys,
+ struct devlink_linecard_set_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_LINECARD_SET, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.linecard_index)
+ mnl_attr_put_u32(nlh, DEVLINK_ATTR_LINECARD_INDEX, req->linecard_index);
+ if (req->_present.linecard_type_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_LINECARD_TYPE, req->linecard_type);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
/* ============== DEVLINK_CMD_SELFTESTS_GET ============== */
/* DEVLINK_CMD_SELFTESTS_GET - do */
void devlink_selftests_get_req_free(struct devlink_selftests_get_req *req)
@@ -2977,6 +6795,39 @@ free_list:
return NULL;
}
+/* ============== DEVLINK_CMD_SELFTESTS_RUN ============== */
+/* DEVLINK_CMD_SELFTESTS_RUN - do */
+void devlink_selftests_run_req_free(struct devlink_selftests_run_req *req)
+{
+ free(req->bus_name);
+ free(req->dev_name);
+ devlink_dl_selftest_id_free(&req->selftests);
+ free(req);
+}
+
+int devlink_selftests_run(struct ynl_sock *ys,
+ struct devlink_selftests_run_req *req)
+{
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = ynl_gemsg_start_req(ys, ys->family_id, DEVLINK_CMD_SELFTESTS_RUN, 1);
+ ys->req_policy = &devlink_nest;
+
+ if (req->_present.bus_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_BUS_NAME, req->bus_name);
+ if (req->_present.dev_name_len)
+ mnl_attr_put_strz(nlh, DEVLINK_ATTR_DEV_NAME, req->dev_name);
+ if (req->_present.selftests)
+ devlink_dl_selftest_id_put(nlh, DEVLINK_ATTR_SELFTESTS, &req->selftests);
+
+ err = ynl_exec(ys, nlh, NULL);
+ if (err < 0)
+ return -1;
+
+ return 0;
+}
+
const struct ynl_family ynl_devlink_family = {
.name = "devlink",
};
diff --git a/tools/net/ynl/generated/devlink-user.h b/tools/net/ynl/generated/devlink-user.h
index 4b686d147613..1db4edc36eaa 100644
--- a/tools/net/ynl/generated/devlink-user.h
+++ b/tools/net/ynl/generated/devlink-user.h
@@ -9,6 +9,7 @@
#include <stdlib.h>
#include <string.h>
#include <linux/types.h>
+#include <linux/netlink.h>
#include <linux/devlink.h>
struct ynl_sock;
@@ -18,8 +19,130 @@ extern const struct ynl_family ynl_devlink_family;
/* Enums */
const char *devlink_op_str(int op);
const char *devlink_sb_pool_type_str(enum devlink_sb_pool_type value);
+const char *devlink_port_type_str(enum devlink_port_type value);
+const char *devlink_port_flavour_str(enum devlink_port_flavour value);
+const char *devlink_port_fn_state_str(enum devlink_port_fn_state value);
+const char *devlink_port_fn_opstate_str(enum devlink_port_fn_opstate value);
+const char *devlink_port_fn_attr_cap_str(enum devlink_port_fn_attr_cap value);
+const char *
+devlink_sb_threshold_type_str(enum devlink_sb_threshold_type value);
+const char *devlink_eswitch_mode_str(enum devlink_eswitch_mode value);
+const char *
+devlink_eswitch_inline_mode_str(enum devlink_eswitch_inline_mode value);
+const char *
+devlink_eswitch_encap_mode_str(enum devlink_eswitch_encap_mode value);
+const char *devlink_dpipe_match_type_str(enum devlink_dpipe_match_type value);
+const char *
+devlink_dpipe_action_type_str(enum devlink_dpipe_action_type value);
+const char *
+devlink_dpipe_field_mapping_type_str(enum devlink_dpipe_field_mapping_type value);
+const char *devlink_resource_unit_str(enum devlink_resource_unit value);
+const char *devlink_reload_action_str(enum devlink_reload_action value);
+const char *devlink_param_cmode_str(enum devlink_param_cmode value);
+const char *devlink_flash_overwrite_str(enum devlink_flash_overwrite value);
+const char *devlink_trap_action_str(enum devlink_trap_action value);
/* Common nested types */
+struct devlink_dl_dpipe_match {
+ struct {
+ __u32 dpipe_match_type:1;
+ __u32 dpipe_header_id:1;
+ __u32 dpipe_header_global:1;
+ __u32 dpipe_header_index:1;
+ __u32 dpipe_field_id:1;
+ } _present;
+
+ enum devlink_dpipe_match_type dpipe_match_type;
+ __u32 dpipe_header_id;
+ __u8 dpipe_header_global;
+ __u32 dpipe_header_index;
+ __u32 dpipe_field_id;
+};
+
+struct devlink_dl_dpipe_match_value {
+ struct {
+ __u32 dpipe_value_len;
+ __u32 dpipe_value_mask_len;
+ __u32 dpipe_value_mapping:1;
+ } _present;
+
+ unsigned int n_dpipe_match;
+ struct devlink_dl_dpipe_match *dpipe_match;
+ void *dpipe_value;
+ void *dpipe_value_mask;
+ __u32 dpipe_value_mapping;
+};
+
+struct devlink_dl_dpipe_action {
+ struct {
+ __u32 dpipe_action_type:1;
+ __u32 dpipe_header_id:1;
+ __u32 dpipe_header_global:1;
+ __u32 dpipe_header_index:1;
+ __u32 dpipe_field_id:1;
+ } _present;
+
+ enum devlink_dpipe_action_type dpipe_action_type;
+ __u32 dpipe_header_id;
+ __u8 dpipe_header_global;
+ __u32 dpipe_header_index;
+ __u32 dpipe_field_id;
+};
+
+struct devlink_dl_dpipe_action_value {
+ struct {
+ __u32 dpipe_value_len;
+ __u32 dpipe_value_mask_len;
+ __u32 dpipe_value_mapping:1;
+ } _present;
+
+ unsigned int n_dpipe_action;
+ struct devlink_dl_dpipe_action *dpipe_action;
+ void *dpipe_value;
+ void *dpipe_value_mask;
+ __u32 dpipe_value_mapping;
+};
+
+struct devlink_dl_dpipe_field {
+ struct {
+ __u32 dpipe_field_name_len;
+ __u32 dpipe_field_id:1;
+ __u32 dpipe_field_bitwidth:1;
+ __u32 dpipe_field_mapping_type:1;
+ } _present;
+
+ char *dpipe_field_name;
+ __u32 dpipe_field_id;
+ __u32 dpipe_field_bitwidth;
+ enum devlink_dpipe_field_mapping_type dpipe_field_mapping_type;
+};
+
+struct devlink_dl_resource {
+ struct {
+ __u32 resource_name_len;
+ __u32 resource_id:1;
+ __u32 resource_size:1;
+ __u32 resource_size_new:1;
+ __u32 resource_size_valid:1;
+ __u32 resource_size_min:1;
+ __u32 resource_size_max:1;
+ __u32 resource_size_gran:1;
+ __u32 resource_unit:1;
+ __u32 resource_occ:1;
+ } _present;
+
+ char *resource_name;
+ __u64 resource_id;
+ __u64 resource_size;
+ __u64 resource_size_new;
+ __u8 resource_size_valid;
+ __u64 resource_size_min;
+ __u64 resource_size_max;
+ __u64 resource_size_gran;
+ enum devlink_resource_unit resource_unit;
+ __u64 resource_occ;
+};
+
struct devlink_dl_info_version {
struct {
__u32 info_version_name_len;
@@ -30,6 +153,32 @@ struct devlink_dl_info_version {
char *info_version_value;
};
+struct devlink_dl_fmsg {
+ struct {
+ __u32 fmsg_obj_nest_start:1;
+ __u32 fmsg_pair_nest_start:1;
+ __u32 fmsg_arr_nest_start:1;
+ __u32 fmsg_nest_end:1;
+ __u32 fmsg_obj_name_len;
+ } _present;
+
+ char *fmsg_obj_name;
+};
+
+struct devlink_dl_port_function {
+ struct {
+ __u32 hw_addr_len;
+ __u32 state:1;
+ __u32 opstate:1;
+ __u32 caps:1;
+ } _present;
+
+ void *hw_addr;
+ enum devlink_port_fn_state state;
+ enum devlink_port_fn_opstate opstate;
+ struct nla_bitfield32 caps;
+};
+
struct devlink_dl_reload_stats_entry {
struct {
__u32 reload_stats_limit:1;
@@ -45,21 +194,120 @@ struct devlink_dl_reload_act_stats {
struct devlink_dl_reload_stats_entry *reload_stats_entry;
};
+struct devlink_dl_selftest_id {
+ struct {
+ __u32 flash:1;
+ } _present;
+};
+
+struct devlink_dl_dpipe_table_matches {
+ unsigned int n_dpipe_match;
+ struct devlink_dl_dpipe_match *dpipe_match;
+};
+
+struct devlink_dl_dpipe_table_actions {
+ unsigned int n_dpipe_action;
+ struct devlink_dl_dpipe_action *dpipe_action;
+};
+
+struct devlink_dl_dpipe_entry_match_values {
+ unsigned int n_dpipe_match_value;
+ struct devlink_dl_dpipe_match_value *dpipe_match_value;
+};
+
+struct devlink_dl_dpipe_entry_action_values {
+ unsigned int n_dpipe_action_value;
+ struct devlink_dl_dpipe_action_value *dpipe_action_value;
+};
+
+struct devlink_dl_dpipe_header_fields {
+ unsigned int n_dpipe_field;
+ struct devlink_dl_dpipe_field *dpipe_field;
+};
+
+struct devlink_dl_resource_list {
+ unsigned int n_resource;
+ struct devlink_dl_resource *resource;
+};
+
struct devlink_dl_reload_act_info {
struct {
__u32 reload_action:1;
} _present;
- __u8 reload_action;
+ enum devlink_reload_action reload_action;
unsigned int n_reload_action_stats;
struct devlink_dl_reload_act_stats *reload_action_stats;
};
+struct devlink_dl_dpipe_table {
+ struct {
+ __u32 dpipe_table_name_len;
+ __u32 dpipe_table_size:1;
+ __u32 dpipe_table_matches:1;
+ __u32 dpipe_table_actions:1;
+ __u32 dpipe_table_counters_enabled:1;
+ __u32 dpipe_table_resource_id:1;
+ __u32 dpipe_table_resource_units:1;
+ } _present;
+
+ char *dpipe_table_name;
+ __u64 dpipe_table_size;
+ struct devlink_dl_dpipe_table_matches dpipe_table_matches;
+ struct devlink_dl_dpipe_table_actions dpipe_table_actions;
+ __u8 dpipe_table_counters_enabled;
+ __u64 dpipe_table_resource_id;
+ __u64 dpipe_table_resource_units;
+};
+
+struct devlink_dl_dpipe_entry {
+ struct {
+ __u32 dpipe_entry_index:1;
+ __u32 dpipe_entry_match_values:1;
+ __u32 dpipe_entry_action_values:1;
+ __u32 dpipe_entry_counter:1;
+ } _present;
+
+ __u64 dpipe_entry_index;
+ struct devlink_dl_dpipe_entry_match_values dpipe_entry_match_values;
+ struct devlink_dl_dpipe_entry_action_values dpipe_entry_action_values;
+ __u64 dpipe_entry_counter;
+};
+
+struct devlink_dl_dpipe_header {
+ struct {
+ __u32 dpipe_header_name_len;
+ __u32 dpipe_header_id:1;
+ __u32 dpipe_header_global:1;
+ __u32 dpipe_header_fields:1;
+ } _present;
+
+ char *dpipe_header_name;
+ __u32 dpipe_header_id;
+ __u8 dpipe_header_global;
+ struct devlink_dl_dpipe_header_fields dpipe_header_fields;
+};
+
struct devlink_dl_reload_stats {
unsigned int n_reload_action_info;
struct devlink_dl_reload_act_info *reload_action_info;
};
+struct devlink_dl_dpipe_tables {
+ unsigned int n_dpipe_table;
+ struct devlink_dl_dpipe_table *dpipe_table;
+};
+
+struct devlink_dl_dpipe_entries {
+ unsigned int n_dpipe_entry;
+ struct devlink_dl_dpipe_entry *dpipe_entry;
+};
+
+struct devlink_dl_dpipe_headers {
+ unsigned int n_dpipe_header;
+ struct devlink_dl_dpipe_header *dpipe_header;
+};
+
struct devlink_dl_dev_stats {
struct {
__u32 reload_stats:1;
@@ -112,14 +360,12 @@ struct devlink_get_rsp {
__u32 bus_name_len;
__u32 dev_name_len;
__u32 reload_failed:1;
- __u32 reload_action:1;
__u32 dev_stats:1;
} _present;
char *bus_name;
char *dev_name;
__u8 reload_failed;
- __u8 reload_action;
struct devlink_dl_dev_stats dev_stats;
};
@@ -134,7 +380,7 @@ devlink_get(struct ynl_sock *ys, struct devlink_get_req *req);
/* DEVLINK_CMD_GET - dump */
struct devlink_get_list {
struct devlink_get_list *next;
- struct devlink_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_get_rsp obj __attribute__((aligned(8)));
};
void devlink_get_list_free(struct devlink_get_list *rsp);
@@ -262,7 +508,7 @@ struct devlink_port_get_rsp_dump {
struct devlink_port_get_rsp_list {
struct devlink_port_get_rsp_list *next;
- struct devlink_port_get_rsp_dump obj __attribute__ ((aligned (8)));
+ struct devlink_port_get_rsp_dump obj __attribute__((aligned(8)));
};
void devlink_port_get_rsp_list_free(struct devlink_port_get_rsp_list *rsp);
@@ -271,6 +517,377 @@ struct devlink_port_get_rsp_list *
devlink_port_get_dump(struct ynl_sock *ys,
struct devlink_port_get_req_dump *req);
+/* ============== DEVLINK_CMD_PORT_SET ============== */
+/* DEVLINK_CMD_PORT_SET - do */
+struct devlink_port_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 port_type:1;
+ __u32 port_function:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ enum devlink_port_type port_type;
+ struct devlink_dl_port_function port_function;
+};
+
+static inline struct devlink_port_set_req *devlink_port_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_port_set_req));
+}
+void devlink_port_set_req_free(struct devlink_port_set_req *req);
+
+static inline void
+devlink_port_set_req_set_bus_name(struct devlink_port_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_port_set_req_set_dev_name(struct devlink_port_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_port_set_req_set_port_index(struct devlink_port_set_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_port_set_req_set_port_type(struct devlink_port_set_req *req,
+ enum devlink_port_type port_type)
+{
+ req->_present.port_type = 1;
+ req->port_type = port_type;
+}
+static inline void
+devlink_port_set_req_set_port_function_hw_addr(struct devlink_port_set_req *req,
+ const void *hw_addr, size_t len)
+{
+ free(req->port_function.hw_addr);
+ req->port_function._present.hw_addr_len = len;
+ req->port_function.hw_addr = malloc(req->port_function._present.hw_addr_len);
+ memcpy(req->port_function.hw_addr, hw_addr, req->port_function._present.hw_addr_len);
+}
+static inline void
+devlink_port_set_req_set_port_function_state(struct devlink_port_set_req *req,
+ enum devlink_port_fn_state state)
+{
+ req->_present.port_function = 1;
+ req->port_function._present.state = 1;
+ req->port_function.state = state;
+}
+static inline void
+devlink_port_set_req_set_port_function_opstate(struct devlink_port_set_req *req,
+ enum devlink_port_fn_opstate opstate)
+{
+ req->_present.port_function = 1;
+ req->port_function._present.opstate = 1;
+ req->port_function.opstate = opstate;
+}
+static inline void
+devlink_port_set_req_set_port_function_caps(struct devlink_port_set_req *req,
+ struct nla_bitfield32 *caps)
+{
+ req->_present.port_function = 1;
+ req->port_function._present.caps = 1;
+ memcpy(&req->port_function.caps, caps, sizeof(struct nla_bitfield32));
+}
+
+/*
+ * Set devlink port instances.
+ */
+int devlink_port_set(struct ynl_sock *ys, struct devlink_port_set_req *req);
+
+/* ============== DEVLINK_CMD_PORT_NEW ============== */
+/* DEVLINK_CMD_PORT_NEW - do */
+struct devlink_port_new_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 port_flavour:1;
+ __u32 port_pci_pf_number:1;
+ __u32 port_pci_sf_number:1;
+ __u32 port_controller_number:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ enum devlink_port_flavour port_flavour;
+ __u16 port_pci_pf_number;
+ __u32 port_pci_sf_number;
+ __u32 port_controller_number;
+};
+
+static inline struct devlink_port_new_req *devlink_port_new_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_port_new_req));
+}
+void devlink_port_new_req_free(struct devlink_port_new_req *req);
+
+static inline void
+devlink_port_new_req_set_bus_name(struct devlink_port_new_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_port_new_req_set_dev_name(struct devlink_port_new_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_port_new_req_set_port_index(struct devlink_port_new_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_port_new_req_set_port_flavour(struct devlink_port_new_req *req,
+ enum devlink_port_flavour port_flavour)
+{
+ req->_present.port_flavour = 1;
+ req->port_flavour = port_flavour;
+}
+static inline void
+devlink_port_new_req_set_port_pci_pf_number(struct devlink_port_new_req *req,
+ __u16 port_pci_pf_number)
+{
+ req->_present.port_pci_pf_number = 1;
+ req->port_pci_pf_number = port_pci_pf_number;
+}
+static inline void
+devlink_port_new_req_set_port_pci_sf_number(struct devlink_port_new_req *req,
+ __u32 port_pci_sf_number)
+{
+ req->_present.port_pci_sf_number = 1;
+ req->port_pci_sf_number = port_pci_sf_number;
+}
+static inline void
+devlink_port_new_req_set_port_controller_number(struct devlink_port_new_req *req,
+ __u32 port_controller_number)
+{
+ req->_present.port_controller_number = 1;
+ req->port_controller_number = port_controller_number;
+}
+
+struct devlink_port_new_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+};
+
+void devlink_port_new_rsp_free(struct devlink_port_new_rsp *rsp);
+
+/*
+ * Create devlink port instances.
+ */
+struct devlink_port_new_rsp *
+devlink_port_new(struct ynl_sock *ys, struct devlink_port_new_req *req);
+
+/* ============== DEVLINK_CMD_PORT_DEL ============== */
+/* DEVLINK_CMD_PORT_DEL - do */
+struct devlink_port_del_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+};
+
+static inline struct devlink_port_del_req *devlink_port_del_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_port_del_req));
+}
+void devlink_port_del_req_free(struct devlink_port_del_req *req);
+
+static inline void
+devlink_port_del_req_set_bus_name(struct devlink_port_del_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_port_del_req_set_dev_name(struct devlink_port_del_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_port_del_req_set_port_index(struct devlink_port_del_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+
+/*
+ * Delete devlink port instances.
+ */
+int devlink_port_del(struct ynl_sock *ys, struct devlink_port_del_req *req);
+
+/* ============== DEVLINK_CMD_PORT_SPLIT ============== */
+/* DEVLINK_CMD_PORT_SPLIT - do */
+struct devlink_port_split_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 port_split_count:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ __u32 port_split_count;
+};
+
+static inline struct devlink_port_split_req *devlink_port_split_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_port_split_req));
+}
+void devlink_port_split_req_free(struct devlink_port_split_req *req);
+
+static inline void
+devlink_port_split_req_set_bus_name(struct devlink_port_split_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_port_split_req_set_dev_name(struct devlink_port_split_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_port_split_req_set_port_index(struct devlink_port_split_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_port_split_req_set_port_split_count(struct devlink_port_split_req *req,
+ __u32 port_split_count)
+{
+ req->_present.port_split_count = 1;
+ req->port_split_count = port_split_count;
+}
+
+/*
+ * Split devlink port instances.
+ */
+int devlink_port_split(struct ynl_sock *ys, struct devlink_port_split_req *req);
+
+/* ============== DEVLINK_CMD_PORT_UNSPLIT ============== */
+/* DEVLINK_CMD_PORT_UNSPLIT - do */
+struct devlink_port_unsplit_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+};
+
+static inline struct devlink_port_unsplit_req *
+devlink_port_unsplit_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_port_unsplit_req));
+}
+void devlink_port_unsplit_req_free(struct devlink_port_unsplit_req *req);
+
+static inline void
+devlink_port_unsplit_req_set_bus_name(struct devlink_port_unsplit_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_port_unsplit_req_set_dev_name(struct devlink_port_unsplit_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_port_unsplit_req_set_port_index(struct devlink_port_unsplit_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+
+/*
+ * Unplit devlink port instances.
+ */
+int devlink_port_unsplit(struct ynl_sock *ys,
+ struct devlink_port_unsplit_req *req);
+
/* ============== DEVLINK_CMD_SB_GET ============== */
/* DEVLINK_CMD_SB_GET - do */
struct devlink_sb_get_req {
@@ -379,7 +996,7 @@ devlink_sb_get_req_dump_set_dev_name(struct devlink_sb_get_req_dump *req,
struct devlink_sb_get_list {
struct devlink_sb_get_list *next;
- struct devlink_sb_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_sb_get_rsp obj __attribute__((aligned(8)));
};
void devlink_sb_get_list_free(struct devlink_sb_get_list *rsp);
@@ -509,7 +1126,7 @@ devlink_sb_pool_get_req_dump_set_dev_name(struct devlink_sb_pool_get_req_dump *r
struct devlink_sb_pool_get_list {
struct devlink_sb_pool_get_list *next;
- struct devlink_sb_pool_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_sb_pool_get_rsp obj __attribute__((aligned(8)));
};
void devlink_sb_pool_get_list_free(struct devlink_sb_pool_get_list *rsp);
@@ -518,6 +1135,88 @@ struct devlink_sb_pool_get_list *
devlink_sb_pool_get_dump(struct ynl_sock *ys,
struct devlink_sb_pool_get_req_dump *req);
+/* ============== DEVLINK_CMD_SB_POOL_SET ============== */
+/* DEVLINK_CMD_SB_POOL_SET - do */
+struct devlink_sb_pool_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 sb_index:1;
+ __u32 sb_pool_index:1;
+ __u32 sb_pool_threshold_type:1;
+ __u32 sb_pool_size:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 sb_index;
+ __u16 sb_pool_index;
+ enum devlink_sb_threshold_type sb_pool_threshold_type;
+ __u32 sb_pool_size;
+};
+
+static inline struct devlink_sb_pool_set_req *
+devlink_sb_pool_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_pool_set_req));
+}
+void devlink_sb_pool_set_req_free(struct devlink_sb_pool_set_req *req);
+
+static inline void
+devlink_sb_pool_set_req_set_bus_name(struct devlink_sb_pool_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_pool_set_req_set_dev_name(struct devlink_sb_pool_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_sb_pool_set_req_set_sb_index(struct devlink_sb_pool_set_req *req,
+ __u32 sb_index)
+{
+ req->_present.sb_index = 1;
+ req->sb_index = sb_index;
+}
+static inline void
+devlink_sb_pool_set_req_set_sb_pool_index(struct devlink_sb_pool_set_req *req,
+ __u16 sb_pool_index)
+{
+ req->_present.sb_pool_index = 1;
+ req->sb_pool_index = sb_pool_index;
+}
+static inline void
+devlink_sb_pool_set_req_set_sb_pool_threshold_type(struct devlink_sb_pool_set_req *req,
+ enum devlink_sb_threshold_type sb_pool_threshold_type)
+{
+ req->_present.sb_pool_threshold_type = 1;
+ req->sb_pool_threshold_type = sb_pool_threshold_type;
+}
+static inline void
+devlink_sb_pool_set_req_set_sb_pool_size(struct devlink_sb_pool_set_req *req,
+ __u32 sb_pool_size)
+{
+ req->_present.sb_pool_size = 1;
+ req->sb_pool_size = sb_pool_size;
+}
+
+/*
+ * Set shared buffer pool instances.
+ */
+int devlink_sb_pool_set(struct ynl_sock *ys,
+ struct devlink_sb_pool_set_req *req);
+
/* ============== DEVLINK_CMD_SB_PORT_POOL_GET ============== */
/* DEVLINK_CMD_SB_PORT_POOL_GET - do */
struct devlink_sb_port_pool_get_req {
@@ -654,7 +1353,7 @@ devlink_sb_port_pool_get_req_dump_set_dev_name(struct devlink_sb_port_pool_get_r
struct devlink_sb_port_pool_get_list {
struct devlink_sb_port_pool_get_list *next;
- struct devlink_sb_port_pool_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_sb_port_pool_get_rsp obj __attribute__((aligned(8)));
};
void
@@ -664,6 +1363,89 @@ struct devlink_sb_port_pool_get_list *
devlink_sb_port_pool_get_dump(struct ynl_sock *ys,
struct devlink_sb_port_pool_get_req_dump *req);
+/* ============== DEVLINK_CMD_SB_PORT_POOL_SET ============== */
+/* DEVLINK_CMD_SB_PORT_POOL_SET - do */
+struct devlink_sb_port_pool_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 sb_index:1;
+ __u32 sb_pool_index:1;
+ __u32 sb_threshold:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ __u32 sb_index;
+ __u16 sb_pool_index;
+ __u32 sb_threshold;
+};
+
+static inline struct devlink_sb_port_pool_set_req *
+devlink_sb_port_pool_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_port_pool_set_req));
+}
+void
+devlink_sb_port_pool_set_req_free(struct devlink_sb_port_pool_set_req *req);
+
+static inline void
+devlink_sb_port_pool_set_req_set_bus_name(struct devlink_sb_port_pool_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_port_pool_set_req_set_dev_name(struct devlink_sb_port_pool_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_sb_port_pool_set_req_set_port_index(struct devlink_sb_port_pool_set_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_sb_port_pool_set_req_set_sb_index(struct devlink_sb_port_pool_set_req *req,
+ __u32 sb_index)
+{
+ req->_present.sb_index = 1;
+ req->sb_index = sb_index;
+}
+static inline void
+devlink_sb_port_pool_set_req_set_sb_pool_index(struct devlink_sb_port_pool_set_req *req,
+ __u16 sb_pool_index)
+{
+ req->_present.sb_pool_index = 1;
+ req->sb_pool_index = sb_pool_index;
+}
+static inline void
+devlink_sb_port_pool_set_req_set_sb_threshold(struct devlink_sb_port_pool_set_req *req,
+ __u32 sb_threshold)
+{
+ req->_present.sb_threshold = 1;
+ req->sb_threshold = sb_threshold;
+}
+
+/*
+ * Set shared buffer port-pool combinations and threshold.
+ */
+int devlink_sb_port_pool_set(struct ynl_sock *ys,
+ struct devlink_sb_port_pool_set_req *req);
+
/* ============== DEVLINK_CMD_SB_TC_POOL_BIND_GET ============== */
/* DEVLINK_CMD_SB_TC_POOL_BIND_GET - do */
struct devlink_sb_tc_pool_bind_get_req {
@@ -811,7 +1593,7 @@ devlink_sb_tc_pool_bind_get_req_dump_set_dev_name(struct devlink_sb_tc_pool_bind
struct devlink_sb_tc_pool_bind_get_list {
struct devlink_sb_tc_pool_bind_get_list *next;
- struct devlink_sb_tc_pool_bind_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_sb_tc_pool_bind_get_rsp obj __attribute__((aligned(8)));
};
void
@@ -821,6 +1603,861 @@ struct devlink_sb_tc_pool_bind_get_list *
devlink_sb_tc_pool_bind_get_dump(struct ynl_sock *ys,
struct devlink_sb_tc_pool_bind_get_req_dump *req);
+/* ============== DEVLINK_CMD_SB_TC_POOL_BIND_SET ============== */
+/* DEVLINK_CMD_SB_TC_POOL_BIND_SET - do */
+struct devlink_sb_tc_pool_bind_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 sb_index:1;
+ __u32 sb_pool_index:1;
+ __u32 sb_pool_type:1;
+ __u32 sb_tc_index:1;
+ __u32 sb_threshold:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ __u32 sb_index;
+ __u16 sb_pool_index;
+ enum devlink_sb_pool_type sb_pool_type;
+ __u16 sb_tc_index;
+ __u32 sb_threshold;
+};
+
+static inline struct devlink_sb_tc_pool_bind_set_req *
+devlink_sb_tc_pool_bind_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_tc_pool_bind_set_req));
+}
+void
+devlink_sb_tc_pool_bind_set_req_free(struct devlink_sb_tc_pool_bind_set_req *req);
+
+static inline void
+devlink_sb_tc_pool_bind_set_req_set_bus_name(struct devlink_sb_tc_pool_bind_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_tc_pool_bind_set_req_set_dev_name(struct devlink_sb_tc_pool_bind_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_sb_tc_pool_bind_set_req_set_port_index(struct devlink_sb_tc_pool_bind_set_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_sb_tc_pool_bind_set_req_set_sb_index(struct devlink_sb_tc_pool_bind_set_req *req,
+ __u32 sb_index)
+{
+ req->_present.sb_index = 1;
+ req->sb_index = sb_index;
+}
+static inline void
+devlink_sb_tc_pool_bind_set_req_set_sb_pool_index(struct devlink_sb_tc_pool_bind_set_req *req,
+ __u16 sb_pool_index)
+{
+ req->_present.sb_pool_index = 1;
+ req->sb_pool_index = sb_pool_index;
+}
+static inline void
+devlink_sb_tc_pool_bind_set_req_set_sb_pool_type(struct devlink_sb_tc_pool_bind_set_req *req,
+ enum devlink_sb_pool_type sb_pool_type)
+{
+ req->_present.sb_pool_type = 1;
+ req->sb_pool_type = sb_pool_type;
+}
+static inline void
+devlink_sb_tc_pool_bind_set_req_set_sb_tc_index(struct devlink_sb_tc_pool_bind_set_req *req,
+ __u16 sb_tc_index)
+{
+ req->_present.sb_tc_index = 1;
+ req->sb_tc_index = sb_tc_index;
+}
+static inline void
+devlink_sb_tc_pool_bind_set_req_set_sb_threshold(struct devlink_sb_tc_pool_bind_set_req *req,
+ __u32 sb_threshold)
+{
+ req->_present.sb_threshold = 1;
+ req->sb_threshold = sb_threshold;
+}
+
+/*
+ * Set shared buffer port-TC to pool bindings and threshold.
+ */
+int devlink_sb_tc_pool_bind_set(struct ynl_sock *ys,
+ struct devlink_sb_tc_pool_bind_set_req *req);
+
+/* ============== DEVLINK_CMD_SB_OCC_SNAPSHOT ============== */
+/* DEVLINK_CMD_SB_OCC_SNAPSHOT - do */
+struct devlink_sb_occ_snapshot_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 sb_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 sb_index;
+};
+
+static inline struct devlink_sb_occ_snapshot_req *
+devlink_sb_occ_snapshot_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_occ_snapshot_req));
+}
+void devlink_sb_occ_snapshot_req_free(struct devlink_sb_occ_snapshot_req *req);
+
+static inline void
+devlink_sb_occ_snapshot_req_set_bus_name(struct devlink_sb_occ_snapshot_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_occ_snapshot_req_set_dev_name(struct devlink_sb_occ_snapshot_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_sb_occ_snapshot_req_set_sb_index(struct devlink_sb_occ_snapshot_req *req,
+ __u32 sb_index)
+{
+ req->_present.sb_index = 1;
+ req->sb_index = sb_index;
+}
+
+/*
+ * Take occupancy snapshot of shared buffer.
+ */
+int devlink_sb_occ_snapshot(struct ynl_sock *ys,
+ struct devlink_sb_occ_snapshot_req *req);
+
+/* ============== DEVLINK_CMD_SB_OCC_MAX_CLEAR ============== */
+/* DEVLINK_CMD_SB_OCC_MAX_CLEAR - do */
+struct devlink_sb_occ_max_clear_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 sb_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 sb_index;
+};
+
+static inline struct devlink_sb_occ_max_clear_req *
+devlink_sb_occ_max_clear_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_sb_occ_max_clear_req));
+}
+void
+devlink_sb_occ_max_clear_req_free(struct devlink_sb_occ_max_clear_req *req);
+
+static inline void
+devlink_sb_occ_max_clear_req_set_bus_name(struct devlink_sb_occ_max_clear_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_sb_occ_max_clear_req_set_dev_name(struct devlink_sb_occ_max_clear_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_sb_occ_max_clear_req_set_sb_index(struct devlink_sb_occ_max_clear_req *req,
+ __u32 sb_index)
+{
+ req->_present.sb_index = 1;
+ req->sb_index = sb_index;
+}
+
+/*
+ * Clear occupancy watermarks of shared buffer.
+ */
+int devlink_sb_occ_max_clear(struct ynl_sock *ys,
+ struct devlink_sb_occ_max_clear_req *req);
+
+/* ============== DEVLINK_CMD_ESWITCH_GET ============== */
+/* DEVLINK_CMD_ESWITCH_GET - do */
+struct devlink_eswitch_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_eswitch_get_req *
+devlink_eswitch_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_eswitch_get_req));
+}
+void devlink_eswitch_get_req_free(struct devlink_eswitch_get_req *req);
+
+static inline void
+devlink_eswitch_get_req_set_bus_name(struct devlink_eswitch_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_eswitch_get_req_set_dev_name(struct devlink_eswitch_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_eswitch_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 eswitch_mode:1;
+ __u32 eswitch_inline_mode:1;
+ __u32 eswitch_encap_mode:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ enum devlink_eswitch_mode eswitch_mode;
+ enum devlink_eswitch_inline_mode eswitch_inline_mode;
+ enum devlink_eswitch_encap_mode eswitch_encap_mode;
+};
+
+void devlink_eswitch_get_rsp_free(struct devlink_eswitch_get_rsp *rsp);
+
+/*
+ * Get eswitch attributes.
+ */
+struct devlink_eswitch_get_rsp *
+devlink_eswitch_get(struct ynl_sock *ys, struct devlink_eswitch_get_req *req);
+
+/* ============== DEVLINK_CMD_ESWITCH_SET ============== */
+/* DEVLINK_CMD_ESWITCH_SET - do */
+struct devlink_eswitch_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 eswitch_mode:1;
+ __u32 eswitch_inline_mode:1;
+ __u32 eswitch_encap_mode:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ enum devlink_eswitch_mode eswitch_mode;
+ enum devlink_eswitch_inline_mode eswitch_inline_mode;
+ enum devlink_eswitch_encap_mode eswitch_encap_mode;
+};
+
+static inline struct devlink_eswitch_set_req *
+devlink_eswitch_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_eswitch_set_req));
+}
+void devlink_eswitch_set_req_free(struct devlink_eswitch_set_req *req);
+
+static inline void
+devlink_eswitch_set_req_set_bus_name(struct devlink_eswitch_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_eswitch_set_req_set_dev_name(struct devlink_eswitch_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_eswitch_set_req_set_eswitch_mode(struct devlink_eswitch_set_req *req,
+ enum devlink_eswitch_mode eswitch_mode)
+{
+ req->_present.eswitch_mode = 1;
+ req->eswitch_mode = eswitch_mode;
+}
+static inline void
+devlink_eswitch_set_req_set_eswitch_inline_mode(struct devlink_eswitch_set_req *req,
+ enum devlink_eswitch_inline_mode eswitch_inline_mode)
+{
+ req->_present.eswitch_inline_mode = 1;
+ req->eswitch_inline_mode = eswitch_inline_mode;
+}
+static inline void
+devlink_eswitch_set_req_set_eswitch_encap_mode(struct devlink_eswitch_set_req *req,
+ enum devlink_eswitch_encap_mode eswitch_encap_mode)
+{
+ req->_present.eswitch_encap_mode = 1;
+ req->eswitch_encap_mode = eswitch_encap_mode;
+}
+
+/*
+ * Set eswitch attributes.
+ */
+int devlink_eswitch_set(struct ynl_sock *ys,
+ struct devlink_eswitch_set_req *req);
+
+/* ============== DEVLINK_CMD_DPIPE_TABLE_GET ============== */
+/* DEVLINK_CMD_DPIPE_TABLE_GET - do */
+struct devlink_dpipe_table_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 dpipe_table_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *dpipe_table_name;
+};
+
+static inline struct devlink_dpipe_table_get_req *
+devlink_dpipe_table_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_dpipe_table_get_req));
+}
+void devlink_dpipe_table_get_req_free(struct devlink_dpipe_table_get_req *req);
+
+static inline void
+devlink_dpipe_table_get_req_set_bus_name(struct devlink_dpipe_table_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_dpipe_table_get_req_set_dev_name(struct devlink_dpipe_table_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_dpipe_table_get_req_set_dpipe_table_name(struct devlink_dpipe_table_get_req *req,
+ const char *dpipe_table_name)
+{
+ free(req->dpipe_table_name);
+ req->_present.dpipe_table_name_len = strlen(dpipe_table_name);
+ req->dpipe_table_name = malloc(req->_present.dpipe_table_name_len + 1);
+ memcpy(req->dpipe_table_name, dpipe_table_name, req->_present.dpipe_table_name_len);
+ req->dpipe_table_name[req->_present.dpipe_table_name_len] = 0;
+}
+
+struct devlink_dpipe_table_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 dpipe_tables:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ struct devlink_dl_dpipe_tables dpipe_tables;
+};
+
+void devlink_dpipe_table_get_rsp_free(struct devlink_dpipe_table_get_rsp *rsp);
+
+/*
+ * Get dpipe table attributes.
+ */
+struct devlink_dpipe_table_get_rsp *
+devlink_dpipe_table_get(struct ynl_sock *ys,
+ struct devlink_dpipe_table_get_req *req);
+
+/* ============== DEVLINK_CMD_DPIPE_ENTRIES_GET ============== */
+/* DEVLINK_CMD_DPIPE_ENTRIES_GET - do */
+struct devlink_dpipe_entries_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 dpipe_table_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *dpipe_table_name;
+};
+
+static inline struct devlink_dpipe_entries_get_req *
+devlink_dpipe_entries_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_dpipe_entries_get_req));
+}
+void
+devlink_dpipe_entries_get_req_free(struct devlink_dpipe_entries_get_req *req);
+
+static inline void
+devlink_dpipe_entries_get_req_set_bus_name(struct devlink_dpipe_entries_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_dpipe_entries_get_req_set_dev_name(struct devlink_dpipe_entries_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_dpipe_entries_get_req_set_dpipe_table_name(struct devlink_dpipe_entries_get_req *req,
+ const char *dpipe_table_name)
+{
+ free(req->dpipe_table_name);
+ req->_present.dpipe_table_name_len = strlen(dpipe_table_name);
+ req->dpipe_table_name = malloc(req->_present.dpipe_table_name_len + 1);
+ memcpy(req->dpipe_table_name, dpipe_table_name, req->_present.dpipe_table_name_len);
+ req->dpipe_table_name[req->_present.dpipe_table_name_len] = 0;
+}
+
+struct devlink_dpipe_entries_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 dpipe_entries:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ struct devlink_dl_dpipe_entries dpipe_entries;
+};
+
+void
+devlink_dpipe_entries_get_rsp_free(struct devlink_dpipe_entries_get_rsp *rsp);
+
+/*
+ * Get dpipe entries attributes.
+ */
+struct devlink_dpipe_entries_get_rsp *
+devlink_dpipe_entries_get(struct ynl_sock *ys,
+ struct devlink_dpipe_entries_get_req *req);
+
+/* ============== DEVLINK_CMD_DPIPE_HEADERS_GET ============== */
+/* DEVLINK_CMD_DPIPE_HEADERS_GET - do */
+struct devlink_dpipe_headers_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_dpipe_headers_get_req *
+devlink_dpipe_headers_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_dpipe_headers_get_req));
+}
+void
+devlink_dpipe_headers_get_req_free(struct devlink_dpipe_headers_get_req *req);
+
+static inline void
+devlink_dpipe_headers_get_req_set_bus_name(struct devlink_dpipe_headers_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_dpipe_headers_get_req_set_dev_name(struct devlink_dpipe_headers_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_dpipe_headers_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 dpipe_headers:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ struct devlink_dl_dpipe_headers dpipe_headers;
+};
+
+void
+devlink_dpipe_headers_get_rsp_free(struct devlink_dpipe_headers_get_rsp *rsp);
+
+/*
+ * Get dpipe headers attributes.
+ */
+struct devlink_dpipe_headers_get_rsp *
+devlink_dpipe_headers_get(struct ynl_sock *ys,
+ struct devlink_dpipe_headers_get_req *req);
+
+/* ============== DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET ============== */
+/* DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET - do */
+struct devlink_dpipe_table_counters_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 dpipe_table_name_len;
+ __u32 dpipe_table_counters_enabled:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *dpipe_table_name;
+ __u8 dpipe_table_counters_enabled;
+};
+
+static inline struct devlink_dpipe_table_counters_set_req *
+devlink_dpipe_table_counters_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_dpipe_table_counters_set_req));
+}
+void
+devlink_dpipe_table_counters_set_req_free(struct devlink_dpipe_table_counters_set_req *req);
+
+static inline void
+devlink_dpipe_table_counters_set_req_set_bus_name(struct devlink_dpipe_table_counters_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_dpipe_table_counters_set_req_set_dev_name(struct devlink_dpipe_table_counters_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_dpipe_table_counters_set_req_set_dpipe_table_name(struct devlink_dpipe_table_counters_set_req *req,
+ const char *dpipe_table_name)
+{
+ free(req->dpipe_table_name);
+ req->_present.dpipe_table_name_len = strlen(dpipe_table_name);
+ req->dpipe_table_name = malloc(req->_present.dpipe_table_name_len + 1);
+ memcpy(req->dpipe_table_name, dpipe_table_name, req->_present.dpipe_table_name_len);
+ req->dpipe_table_name[req->_present.dpipe_table_name_len] = 0;
+}
+static inline void
+devlink_dpipe_table_counters_set_req_set_dpipe_table_counters_enabled(struct devlink_dpipe_table_counters_set_req *req,
+ __u8 dpipe_table_counters_enabled)
+{
+ req->_present.dpipe_table_counters_enabled = 1;
+ req->dpipe_table_counters_enabled = dpipe_table_counters_enabled;
+}
+
+/*
+ * Set dpipe counter attributes.
+ */
+int devlink_dpipe_table_counters_set(struct ynl_sock *ys,
+ struct devlink_dpipe_table_counters_set_req *req);
+
+/* ============== DEVLINK_CMD_RESOURCE_SET ============== */
+/* DEVLINK_CMD_RESOURCE_SET - do */
+struct devlink_resource_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 resource_id:1;
+ __u32 resource_size:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u64 resource_id;
+ __u64 resource_size;
+};
+
+static inline struct devlink_resource_set_req *
+devlink_resource_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_resource_set_req));
+}
+void devlink_resource_set_req_free(struct devlink_resource_set_req *req);
+
+static inline void
+devlink_resource_set_req_set_bus_name(struct devlink_resource_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_resource_set_req_set_dev_name(struct devlink_resource_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_resource_set_req_set_resource_id(struct devlink_resource_set_req *req,
+ __u64 resource_id)
+{
+ req->_present.resource_id = 1;
+ req->resource_id = resource_id;
+}
+static inline void
+devlink_resource_set_req_set_resource_size(struct devlink_resource_set_req *req,
+ __u64 resource_size)
+{
+ req->_present.resource_size = 1;
+ req->resource_size = resource_size;
+}
+
+/*
+ * Set resource attributes.
+ */
+int devlink_resource_set(struct ynl_sock *ys,
+ struct devlink_resource_set_req *req);
+
+/* ============== DEVLINK_CMD_RESOURCE_DUMP ============== */
+/* DEVLINK_CMD_RESOURCE_DUMP - do */
+struct devlink_resource_dump_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+};
+
+static inline struct devlink_resource_dump_req *
+devlink_resource_dump_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_resource_dump_req));
+}
+void devlink_resource_dump_req_free(struct devlink_resource_dump_req *req);
+
+static inline void
+devlink_resource_dump_req_set_bus_name(struct devlink_resource_dump_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_resource_dump_req_set_dev_name(struct devlink_resource_dump_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+
+struct devlink_resource_dump_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 resource_list:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ struct devlink_dl_resource_list resource_list;
+};
+
+void devlink_resource_dump_rsp_free(struct devlink_resource_dump_rsp *rsp);
+
+/*
+ * Get resource attributes.
+ */
+struct devlink_resource_dump_rsp *
+devlink_resource_dump(struct ynl_sock *ys,
+ struct devlink_resource_dump_req *req);
+
+/* ============== DEVLINK_CMD_RELOAD ============== */
+/* DEVLINK_CMD_RELOAD - do */
+struct devlink_reload_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 reload_action:1;
+ __u32 reload_limits:1;
+ __u32 netns_pid:1;
+ __u32 netns_fd:1;
+ __u32 netns_id:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ enum devlink_reload_action reload_action;
+ struct nla_bitfield32 reload_limits;
+ __u32 netns_pid;
+ __u32 netns_fd;
+ __u32 netns_id;
+};
+
+static inline struct devlink_reload_req *devlink_reload_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_reload_req));
+}
+void devlink_reload_req_free(struct devlink_reload_req *req);
+
+static inline void
+devlink_reload_req_set_bus_name(struct devlink_reload_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_reload_req_set_dev_name(struct devlink_reload_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_reload_req_set_reload_action(struct devlink_reload_req *req,
+ enum devlink_reload_action reload_action)
+{
+ req->_present.reload_action = 1;
+ req->reload_action = reload_action;
+}
+static inline void
+devlink_reload_req_set_reload_limits(struct devlink_reload_req *req,
+ struct nla_bitfield32 *reload_limits)
+{
+ req->_present.reload_limits = 1;
+ memcpy(&req->reload_limits, reload_limits, sizeof(struct nla_bitfield32));
+}
+static inline void
+devlink_reload_req_set_netns_pid(struct devlink_reload_req *req,
+ __u32 netns_pid)
+{
+ req->_present.netns_pid = 1;
+ req->netns_pid = netns_pid;
+}
+static inline void
+devlink_reload_req_set_netns_fd(struct devlink_reload_req *req, __u32 netns_fd)
+{
+ req->_present.netns_fd = 1;
+ req->netns_fd = netns_fd;
+}
+static inline void
+devlink_reload_req_set_netns_id(struct devlink_reload_req *req, __u32 netns_id)
+{
+ req->_present.netns_id = 1;
+ req->netns_id = netns_id;
+}
+
+struct devlink_reload_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 reload_actions_performed:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ struct nla_bitfield32 reload_actions_performed;
+};
+
+void devlink_reload_rsp_free(struct devlink_reload_rsp *rsp);
+
+/*
+ * Reload devlink.
+ */
+struct devlink_reload_rsp *
+devlink_reload(struct ynl_sock *ys, struct devlink_reload_req *req);
+
/* ============== DEVLINK_CMD_PARAM_GET ============== */
/* DEVLINK_CMD_PARAM_GET - do */
struct devlink_param_get_req {
@@ -933,7 +2570,7 @@ devlink_param_get_req_dump_set_dev_name(struct devlink_param_get_req_dump *req,
struct devlink_param_get_list {
struct devlink_param_get_list *next;
- struct devlink_param_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_param_get_rsp obj __attribute__((aligned(8)));
};
void devlink_param_get_list_free(struct devlink_param_get_list *rsp);
@@ -942,6 +2579,80 @@ struct devlink_param_get_list *
devlink_param_get_dump(struct ynl_sock *ys,
struct devlink_param_get_req_dump *req);
+/* ============== DEVLINK_CMD_PARAM_SET ============== */
+/* DEVLINK_CMD_PARAM_SET - do */
+struct devlink_param_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 param_name_len;
+ __u32 param_type:1;
+ __u32 param_value_cmode:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *param_name;
+ __u8 param_type;
+ enum devlink_param_cmode param_value_cmode;
+};
+
+static inline struct devlink_param_set_req *devlink_param_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_param_set_req));
+}
+void devlink_param_set_req_free(struct devlink_param_set_req *req);
+
+static inline void
+devlink_param_set_req_set_bus_name(struct devlink_param_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_param_set_req_set_dev_name(struct devlink_param_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_param_set_req_set_param_name(struct devlink_param_set_req *req,
+ const char *param_name)
+{
+ free(req->param_name);
+ req->_present.param_name_len = strlen(param_name);
+ req->param_name = malloc(req->_present.param_name_len + 1);
+ memcpy(req->param_name, param_name, req->_present.param_name_len);
+ req->param_name[req->_present.param_name_len] = 0;
+}
+static inline void
+devlink_param_set_req_set_param_type(struct devlink_param_set_req *req,
+ __u8 param_type)
+{
+ req->_present.param_type = 1;
+ req->param_type = param_type;
+}
+static inline void
+devlink_param_set_req_set_param_value_cmode(struct devlink_param_set_req *req,
+ enum devlink_param_cmode param_value_cmode)
+{
+ req->_present.param_value_cmode = 1;
+ req->param_value_cmode = param_value_cmode;
+}
+
+/*
+ * Set param instances.
+ */
+int devlink_param_set(struct ynl_sock *ys, struct devlink_param_set_req *req);
+
/* ============== DEVLINK_CMD_REGION_GET ============== */
/* DEVLINK_CMD_REGION_GET - do */
struct devlink_region_get_req {
@@ -1065,7 +2776,7 @@ devlink_region_get_req_dump_set_dev_name(struct devlink_region_get_req_dump *req
struct devlink_region_get_list {
struct devlink_region_get_list *next;
- struct devlink_region_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_region_get_rsp obj __attribute__((aligned(8)));
};
void devlink_region_get_list_free(struct devlink_region_get_list *rsp);
@@ -1074,6 +2785,430 @@ struct devlink_region_get_list *
devlink_region_get_dump(struct ynl_sock *ys,
struct devlink_region_get_req_dump *req);
+/* ============== DEVLINK_CMD_REGION_NEW ============== */
+/* DEVLINK_CMD_REGION_NEW - do */
+struct devlink_region_new_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 region_name_len;
+ __u32 region_snapshot_id:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *region_name;
+ __u32 region_snapshot_id;
+};
+
+static inline struct devlink_region_new_req *devlink_region_new_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_region_new_req));
+}
+void devlink_region_new_req_free(struct devlink_region_new_req *req);
+
+static inline void
+devlink_region_new_req_set_bus_name(struct devlink_region_new_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_region_new_req_set_dev_name(struct devlink_region_new_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_region_new_req_set_port_index(struct devlink_region_new_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_region_new_req_set_region_name(struct devlink_region_new_req *req,
+ const char *region_name)
+{
+ free(req->region_name);
+ req->_present.region_name_len = strlen(region_name);
+ req->region_name = malloc(req->_present.region_name_len + 1);
+ memcpy(req->region_name, region_name, req->_present.region_name_len);
+ req->region_name[req->_present.region_name_len] = 0;
+}
+static inline void
+devlink_region_new_req_set_region_snapshot_id(struct devlink_region_new_req *req,
+ __u32 region_snapshot_id)
+{
+ req->_present.region_snapshot_id = 1;
+ req->region_snapshot_id = region_snapshot_id;
+}
+
+struct devlink_region_new_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 region_name_len;
+ __u32 region_snapshot_id:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *region_name;
+ __u32 region_snapshot_id;
+};
+
+void devlink_region_new_rsp_free(struct devlink_region_new_rsp *rsp);
+
+/*
+ * Create region snapshot.
+ */
+struct devlink_region_new_rsp *
+devlink_region_new(struct ynl_sock *ys, struct devlink_region_new_req *req);
+
+/* ============== DEVLINK_CMD_REGION_DEL ============== */
+/* DEVLINK_CMD_REGION_DEL - do */
+struct devlink_region_del_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 region_name_len;
+ __u32 region_snapshot_id:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *region_name;
+ __u32 region_snapshot_id;
+};
+
+static inline struct devlink_region_del_req *devlink_region_del_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_region_del_req));
+}
+void devlink_region_del_req_free(struct devlink_region_del_req *req);
+
+static inline void
+devlink_region_del_req_set_bus_name(struct devlink_region_del_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_region_del_req_set_dev_name(struct devlink_region_del_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_region_del_req_set_port_index(struct devlink_region_del_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_region_del_req_set_region_name(struct devlink_region_del_req *req,
+ const char *region_name)
+{
+ free(req->region_name);
+ req->_present.region_name_len = strlen(region_name);
+ req->region_name = malloc(req->_present.region_name_len + 1);
+ memcpy(req->region_name, region_name, req->_present.region_name_len);
+ req->region_name[req->_present.region_name_len] = 0;
+}
+static inline void
+devlink_region_del_req_set_region_snapshot_id(struct devlink_region_del_req *req,
+ __u32 region_snapshot_id)
+{
+ req->_present.region_snapshot_id = 1;
+ req->region_snapshot_id = region_snapshot_id;
+}
+
+/*
+ * Delete region snapshot.
+ */
+int devlink_region_del(struct ynl_sock *ys, struct devlink_region_del_req *req);
+
+/* ============== DEVLINK_CMD_REGION_READ ============== */
+/* DEVLINK_CMD_REGION_READ - dump */
+struct devlink_region_read_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 region_name_len;
+ __u32 region_snapshot_id:1;
+ __u32 region_direct:1;
+ __u32 region_chunk_addr:1;
+ __u32 region_chunk_len:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *region_name;
+ __u32 region_snapshot_id;
+ __u64 region_chunk_addr;
+ __u64 region_chunk_len;
+};
+
+static inline struct devlink_region_read_req_dump *
+devlink_region_read_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_region_read_req_dump));
+}
+void
+devlink_region_read_req_dump_free(struct devlink_region_read_req_dump *req);
+
+static inline void
+devlink_region_read_req_dump_set_bus_name(struct devlink_region_read_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_region_read_req_dump_set_dev_name(struct devlink_region_read_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_region_read_req_dump_set_port_index(struct devlink_region_read_req_dump *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_region_read_req_dump_set_region_name(struct devlink_region_read_req_dump *req,
+ const char *region_name)
+{
+ free(req->region_name);
+ req->_present.region_name_len = strlen(region_name);
+ req->region_name = malloc(req->_present.region_name_len + 1);
+ memcpy(req->region_name, region_name, req->_present.region_name_len);
+ req->region_name[req->_present.region_name_len] = 0;
+}
+static inline void
+devlink_region_read_req_dump_set_region_snapshot_id(struct devlink_region_read_req_dump *req,
+ __u32 region_snapshot_id)
+{
+ req->_present.region_snapshot_id = 1;
+ req->region_snapshot_id = region_snapshot_id;
+}
+static inline void
+devlink_region_read_req_dump_set_region_direct(struct devlink_region_read_req_dump *req)
+{
+ req->_present.region_direct = 1;
+}
+static inline void
+devlink_region_read_req_dump_set_region_chunk_addr(struct devlink_region_read_req_dump *req,
+ __u64 region_chunk_addr)
+{
+ req->_present.region_chunk_addr = 1;
+ req->region_chunk_addr = region_chunk_addr;
+}
+static inline void
+devlink_region_read_req_dump_set_region_chunk_len(struct devlink_region_read_req_dump *req,
+ __u64 region_chunk_len)
+{
+ req->_present.region_chunk_len = 1;
+ req->region_chunk_len = region_chunk_len;
+}
+
+struct devlink_region_read_rsp_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 region_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *region_name;
+};
+
+struct devlink_region_read_rsp_list {
+ struct devlink_region_read_rsp_list *next;
+ struct devlink_region_read_rsp_dump obj __attribute__((aligned(8)));
+};
+
+void
+devlink_region_read_rsp_list_free(struct devlink_region_read_rsp_list *rsp);
+
+struct devlink_region_read_rsp_list *
+devlink_region_read_dump(struct ynl_sock *ys,
+ struct devlink_region_read_req_dump *req);
+
+/* ============== DEVLINK_CMD_PORT_PARAM_GET ============== */
+/* DEVLINK_CMD_PORT_PARAM_GET - do */
+struct devlink_port_param_get_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+};
+
+static inline struct devlink_port_param_get_req *
+devlink_port_param_get_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_port_param_get_req));
+}
+void devlink_port_param_get_req_free(struct devlink_port_param_get_req *req);
+
+static inline void
+devlink_port_param_get_req_set_bus_name(struct devlink_port_param_get_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_port_param_get_req_set_dev_name(struct devlink_port_param_get_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_port_param_get_req_set_port_index(struct devlink_port_param_get_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+
+struct devlink_port_param_get_rsp {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+};
+
+void devlink_port_param_get_rsp_free(struct devlink_port_param_get_rsp *rsp);
+
+/*
+ * Get port param instances.
+ */
+struct devlink_port_param_get_rsp *
+devlink_port_param_get(struct ynl_sock *ys,
+ struct devlink_port_param_get_req *req);
+
+/* DEVLINK_CMD_PORT_PARAM_GET - dump */
+struct devlink_port_param_get_list {
+ struct devlink_port_param_get_list *next;
+ struct devlink_port_param_get_rsp obj __attribute__((aligned(8)));
+};
+
+void devlink_port_param_get_list_free(struct devlink_port_param_get_list *rsp);
+
+struct devlink_port_param_get_list *
+devlink_port_param_get_dump(struct ynl_sock *ys);
+
+/* ============== DEVLINK_CMD_PORT_PARAM_SET ============== */
+/* DEVLINK_CMD_PORT_PARAM_SET - do */
+struct devlink_port_param_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+};
+
+static inline struct devlink_port_param_set_req *
+devlink_port_param_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_port_param_set_req));
+}
+void devlink_port_param_set_req_free(struct devlink_port_param_set_req *req);
+
+static inline void
+devlink_port_param_set_req_set_bus_name(struct devlink_port_param_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_port_param_set_req_set_dev_name(struct devlink_port_param_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_port_param_set_req_set_port_index(struct devlink_port_param_set_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+
+/*
+ * Set port param instances.
+ */
+int devlink_port_param_set(struct ynl_sock *ys,
+ struct devlink_port_param_set_req *req);
+
/* ============== DEVLINK_CMD_INFO_GET ============== */
/* DEVLINK_CMD_INFO_GET - do */
struct devlink_info_get_req {
@@ -1144,7 +3279,7 @@ devlink_info_get(struct ynl_sock *ys, struct devlink_info_get_req *req);
/* DEVLINK_CMD_INFO_GET - dump */
struct devlink_info_get_list {
struct devlink_info_get_list *next;
- struct devlink_info_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_info_get_rsp obj __attribute__((aligned(8)));
};
void devlink_info_get_list_free(struct devlink_info_get_list *rsp);
@@ -1288,7 +3423,7 @@ devlink_health_reporter_get_req_dump_set_port_index(struct devlink_health_report
struct devlink_health_reporter_get_list {
struct devlink_health_reporter_get_list *next;
- struct devlink_health_reporter_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_health_reporter_get_rsp obj __attribute__((aligned(8)));
};
void
@@ -1298,6 +3433,466 @@ struct devlink_health_reporter_get_list *
devlink_health_reporter_get_dump(struct ynl_sock *ys,
struct devlink_health_reporter_get_req_dump *req);
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_SET ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_SET - do */
+struct devlink_health_reporter_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 health_reporter_name_len;
+ __u32 health_reporter_graceful_period:1;
+ __u32 health_reporter_auto_recover:1;
+ __u32 health_reporter_auto_dump:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *health_reporter_name;
+ __u64 health_reporter_graceful_period;
+ __u8 health_reporter_auto_recover;
+ __u8 health_reporter_auto_dump;
+};
+
+static inline struct devlink_health_reporter_set_req *
+devlink_health_reporter_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_health_reporter_set_req));
+}
+void
+devlink_health_reporter_set_req_free(struct devlink_health_reporter_set_req *req);
+
+static inline void
+devlink_health_reporter_set_req_set_bus_name(struct devlink_health_reporter_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_set_req_set_dev_name(struct devlink_health_reporter_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_set_req_set_port_index(struct devlink_health_reporter_set_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_health_reporter_set_req_set_health_reporter_name(struct devlink_health_reporter_set_req *req,
+ const char *health_reporter_name)
+{
+ free(req->health_reporter_name);
+ req->_present.health_reporter_name_len = strlen(health_reporter_name);
+ req->health_reporter_name = malloc(req->_present.health_reporter_name_len + 1);
+ memcpy(req->health_reporter_name, health_reporter_name, req->_present.health_reporter_name_len);
+ req->health_reporter_name[req->_present.health_reporter_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_set_req_set_health_reporter_graceful_period(struct devlink_health_reporter_set_req *req,
+ __u64 health_reporter_graceful_period)
+{
+ req->_present.health_reporter_graceful_period = 1;
+ req->health_reporter_graceful_period = health_reporter_graceful_period;
+}
+static inline void
+devlink_health_reporter_set_req_set_health_reporter_auto_recover(struct devlink_health_reporter_set_req *req,
+ __u8 health_reporter_auto_recover)
+{
+ req->_present.health_reporter_auto_recover = 1;
+ req->health_reporter_auto_recover = health_reporter_auto_recover;
+}
+static inline void
+devlink_health_reporter_set_req_set_health_reporter_auto_dump(struct devlink_health_reporter_set_req *req,
+ __u8 health_reporter_auto_dump)
+{
+ req->_present.health_reporter_auto_dump = 1;
+ req->health_reporter_auto_dump = health_reporter_auto_dump;
+}
+
+/*
+ * Set health reporter instances.
+ */
+int devlink_health_reporter_set(struct ynl_sock *ys,
+ struct devlink_health_reporter_set_req *req);
+
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_RECOVER ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_RECOVER - do */
+struct devlink_health_reporter_recover_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 health_reporter_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *health_reporter_name;
+};
+
+static inline struct devlink_health_reporter_recover_req *
+devlink_health_reporter_recover_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_health_reporter_recover_req));
+}
+void
+devlink_health_reporter_recover_req_free(struct devlink_health_reporter_recover_req *req);
+
+static inline void
+devlink_health_reporter_recover_req_set_bus_name(struct devlink_health_reporter_recover_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_recover_req_set_dev_name(struct devlink_health_reporter_recover_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_recover_req_set_port_index(struct devlink_health_reporter_recover_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_health_reporter_recover_req_set_health_reporter_name(struct devlink_health_reporter_recover_req *req,
+ const char *health_reporter_name)
+{
+ free(req->health_reporter_name);
+ req->_present.health_reporter_name_len = strlen(health_reporter_name);
+ req->health_reporter_name = malloc(req->_present.health_reporter_name_len + 1);
+ memcpy(req->health_reporter_name, health_reporter_name, req->_present.health_reporter_name_len);
+ req->health_reporter_name[req->_present.health_reporter_name_len] = 0;
+}
+
+/*
+ * Recover health reporter instances.
+ */
+int devlink_health_reporter_recover(struct ynl_sock *ys,
+ struct devlink_health_reporter_recover_req *req);
+
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE - do */
+struct devlink_health_reporter_diagnose_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 health_reporter_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *health_reporter_name;
+};
+
+static inline struct devlink_health_reporter_diagnose_req *
+devlink_health_reporter_diagnose_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_health_reporter_diagnose_req));
+}
+void
+devlink_health_reporter_diagnose_req_free(struct devlink_health_reporter_diagnose_req *req);
+
+static inline void
+devlink_health_reporter_diagnose_req_set_bus_name(struct devlink_health_reporter_diagnose_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_diagnose_req_set_dev_name(struct devlink_health_reporter_diagnose_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_diagnose_req_set_port_index(struct devlink_health_reporter_diagnose_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_health_reporter_diagnose_req_set_health_reporter_name(struct devlink_health_reporter_diagnose_req *req,
+ const char *health_reporter_name)
+{
+ free(req->health_reporter_name);
+ req->_present.health_reporter_name_len = strlen(health_reporter_name);
+ req->health_reporter_name = malloc(req->_present.health_reporter_name_len + 1);
+ memcpy(req->health_reporter_name, health_reporter_name, req->_present.health_reporter_name_len);
+ req->health_reporter_name[req->_present.health_reporter_name_len] = 0;
+}
+
+/*
+ * Diagnose health reporter instances.
+ */
+int devlink_health_reporter_diagnose(struct ynl_sock *ys,
+ struct devlink_health_reporter_diagnose_req *req);
+
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET - dump */
+struct devlink_health_reporter_dump_get_req_dump {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 health_reporter_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *health_reporter_name;
+};
+
+static inline struct devlink_health_reporter_dump_get_req_dump *
+devlink_health_reporter_dump_get_req_dump_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_health_reporter_dump_get_req_dump));
+}
+void
+devlink_health_reporter_dump_get_req_dump_free(struct devlink_health_reporter_dump_get_req_dump *req);
+
+static inline void
+devlink_health_reporter_dump_get_req_dump_set_bus_name(struct devlink_health_reporter_dump_get_req_dump *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_dump_get_req_dump_set_dev_name(struct devlink_health_reporter_dump_get_req_dump *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_dump_get_req_dump_set_port_index(struct devlink_health_reporter_dump_get_req_dump *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_health_reporter_dump_get_req_dump_set_health_reporter_name(struct devlink_health_reporter_dump_get_req_dump *req,
+ const char *health_reporter_name)
+{
+ free(req->health_reporter_name);
+ req->_present.health_reporter_name_len = strlen(health_reporter_name);
+ req->health_reporter_name = malloc(req->_present.health_reporter_name_len + 1);
+ memcpy(req->health_reporter_name, health_reporter_name, req->_present.health_reporter_name_len);
+ req->health_reporter_name[req->_present.health_reporter_name_len] = 0;
+}
+
+struct devlink_health_reporter_dump_get_rsp_dump {
+ struct {
+ __u32 fmsg:1;
+ } _present;
+
+ struct devlink_dl_fmsg fmsg;
+};
+
+struct devlink_health_reporter_dump_get_rsp_list {
+ struct devlink_health_reporter_dump_get_rsp_list *next;
+ struct devlink_health_reporter_dump_get_rsp_dump obj __attribute__((aligned(8)));
+};
+
+void
+devlink_health_reporter_dump_get_rsp_list_free(struct devlink_health_reporter_dump_get_rsp_list *rsp);
+
+struct devlink_health_reporter_dump_get_rsp_list *
+devlink_health_reporter_dump_get_dump(struct ynl_sock *ys,
+ struct devlink_health_reporter_dump_get_req_dump *req);
+
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR - do */
+struct devlink_health_reporter_dump_clear_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 health_reporter_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *health_reporter_name;
+};
+
+static inline struct devlink_health_reporter_dump_clear_req *
+devlink_health_reporter_dump_clear_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_health_reporter_dump_clear_req));
+}
+void
+devlink_health_reporter_dump_clear_req_free(struct devlink_health_reporter_dump_clear_req *req);
+
+static inline void
+devlink_health_reporter_dump_clear_req_set_bus_name(struct devlink_health_reporter_dump_clear_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_dump_clear_req_set_dev_name(struct devlink_health_reporter_dump_clear_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_dump_clear_req_set_port_index(struct devlink_health_reporter_dump_clear_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_health_reporter_dump_clear_req_set_health_reporter_name(struct devlink_health_reporter_dump_clear_req *req,
+ const char *health_reporter_name)
+{
+ free(req->health_reporter_name);
+ req->_present.health_reporter_name_len = strlen(health_reporter_name);
+ req->health_reporter_name = malloc(req->_present.health_reporter_name_len + 1);
+ memcpy(req->health_reporter_name, health_reporter_name, req->_present.health_reporter_name_len);
+ req->health_reporter_name[req->_present.health_reporter_name_len] = 0;
+}
+
+/*
+ * Clear dump of health reporter instances.
+ */
+int devlink_health_reporter_dump_clear(struct ynl_sock *ys,
+ struct devlink_health_reporter_dump_clear_req *req);
+
+/* ============== DEVLINK_CMD_FLASH_UPDATE ============== */
+/* DEVLINK_CMD_FLASH_UPDATE - do */
+struct devlink_flash_update_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 flash_update_file_name_len;
+ __u32 flash_update_component_len;
+ __u32 flash_update_overwrite_mask:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *flash_update_file_name;
+ char *flash_update_component;
+ struct nla_bitfield32 flash_update_overwrite_mask;
+};
+
+static inline struct devlink_flash_update_req *
+devlink_flash_update_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_flash_update_req));
+}
+void devlink_flash_update_req_free(struct devlink_flash_update_req *req);
+
+static inline void
+devlink_flash_update_req_set_bus_name(struct devlink_flash_update_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_flash_update_req_set_dev_name(struct devlink_flash_update_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_flash_update_req_set_flash_update_file_name(struct devlink_flash_update_req *req,
+ const char *flash_update_file_name)
+{
+ free(req->flash_update_file_name);
+ req->_present.flash_update_file_name_len = strlen(flash_update_file_name);
+ req->flash_update_file_name = malloc(req->_present.flash_update_file_name_len + 1);
+ memcpy(req->flash_update_file_name, flash_update_file_name, req->_present.flash_update_file_name_len);
+ req->flash_update_file_name[req->_present.flash_update_file_name_len] = 0;
+}
+static inline void
+devlink_flash_update_req_set_flash_update_component(struct devlink_flash_update_req *req,
+ const char *flash_update_component)
+{
+ free(req->flash_update_component);
+ req->_present.flash_update_component_len = strlen(flash_update_component);
+ req->flash_update_component = malloc(req->_present.flash_update_component_len + 1);
+ memcpy(req->flash_update_component, flash_update_component, req->_present.flash_update_component_len);
+ req->flash_update_component[req->_present.flash_update_component_len] = 0;
+}
+static inline void
+devlink_flash_update_req_set_flash_update_overwrite_mask(struct devlink_flash_update_req *req,
+ struct nla_bitfield32 *flash_update_overwrite_mask)
+{
+ req->_present.flash_update_overwrite_mask = 1;
+ memcpy(&req->flash_update_overwrite_mask, flash_update_overwrite_mask, sizeof(struct nla_bitfield32));
+}
+
+/*
+ * Flash update devlink instances.
+ */
+int devlink_flash_update(struct ynl_sock *ys,
+ struct devlink_flash_update_req *req);
+
/* ============== DEVLINK_CMD_TRAP_GET ============== */
/* DEVLINK_CMD_TRAP_GET - do */
struct devlink_trap_get_req {
@@ -1410,7 +4005,7 @@ devlink_trap_get_req_dump_set_dev_name(struct devlink_trap_get_req_dump *req,
struct devlink_trap_get_list {
struct devlink_trap_get_list *next;
- struct devlink_trap_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_trap_get_rsp obj __attribute__((aligned(8)));
};
void devlink_trap_get_list_free(struct devlink_trap_get_list *rsp);
@@ -1419,6 +4014,71 @@ struct devlink_trap_get_list *
devlink_trap_get_dump(struct ynl_sock *ys,
struct devlink_trap_get_req_dump *req);
+/* ============== DEVLINK_CMD_TRAP_SET ============== */
+/* DEVLINK_CMD_TRAP_SET - do */
+struct devlink_trap_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 trap_name_len;
+ __u32 trap_action:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *trap_name;
+ enum devlink_trap_action trap_action;
+};
+
+static inline struct devlink_trap_set_req *devlink_trap_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_trap_set_req));
+}
+void devlink_trap_set_req_free(struct devlink_trap_set_req *req);
+
+static inline void
+devlink_trap_set_req_set_bus_name(struct devlink_trap_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_trap_set_req_set_dev_name(struct devlink_trap_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_trap_set_req_set_trap_name(struct devlink_trap_set_req *req,
+ const char *trap_name)
+{
+ free(req->trap_name);
+ req->_present.trap_name_len = strlen(trap_name);
+ req->trap_name = malloc(req->_present.trap_name_len + 1);
+ memcpy(req->trap_name, trap_name, req->_present.trap_name_len);
+ req->trap_name[req->_present.trap_name_len] = 0;
+}
+static inline void
+devlink_trap_set_req_set_trap_action(struct devlink_trap_set_req *req,
+ enum devlink_trap_action trap_action)
+{
+ req->_present.trap_action = 1;
+ req->trap_action = trap_action;
+}
+
+/*
+ * Set trap instances.
+ */
+int devlink_trap_set(struct ynl_sock *ys, struct devlink_trap_set_req *req);
+
/* ============== DEVLINK_CMD_TRAP_GROUP_GET ============== */
/* DEVLINK_CMD_TRAP_GROUP_GET - do */
struct devlink_trap_group_get_req {
@@ -1534,7 +4194,7 @@ devlink_trap_group_get_req_dump_set_dev_name(struct devlink_trap_group_get_req_d
struct devlink_trap_group_get_list {
struct devlink_trap_group_get_list *next;
- struct devlink_trap_group_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_trap_group_get_rsp obj __attribute__((aligned(8)));
};
void devlink_trap_group_get_list_free(struct devlink_trap_group_get_list *rsp);
@@ -1543,6 +4203,82 @@ struct devlink_trap_group_get_list *
devlink_trap_group_get_dump(struct ynl_sock *ys,
struct devlink_trap_group_get_req_dump *req);
+/* ============== DEVLINK_CMD_TRAP_GROUP_SET ============== */
+/* DEVLINK_CMD_TRAP_GROUP_SET - do */
+struct devlink_trap_group_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 trap_group_name_len;
+ __u32 trap_action:1;
+ __u32 trap_policer_id:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *trap_group_name;
+ enum devlink_trap_action trap_action;
+ __u32 trap_policer_id;
+};
+
+static inline struct devlink_trap_group_set_req *
+devlink_trap_group_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_trap_group_set_req));
+}
+void devlink_trap_group_set_req_free(struct devlink_trap_group_set_req *req);
+
+static inline void
+devlink_trap_group_set_req_set_bus_name(struct devlink_trap_group_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_trap_group_set_req_set_dev_name(struct devlink_trap_group_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_trap_group_set_req_set_trap_group_name(struct devlink_trap_group_set_req *req,
+ const char *trap_group_name)
+{
+ free(req->trap_group_name);
+ req->_present.trap_group_name_len = strlen(trap_group_name);
+ req->trap_group_name = malloc(req->_present.trap_group_name_len + 1);
+ memcpy(req->trap_group_name, trap_group_name, req->_present.trap_group_name_len);
+ req->trap_group_name[req->_present.trap_group_name_len] = 0;
+}
+static inline void
+devlink_trap_group_set_req_set_trap_action(struct devlink_trap_group_set_req *req,
+ enum devlink_trap_action trap_action)
+{
+ req->_present.trap_action = 1;
+ req->trap_action = trap_action;
+}
+static inline void
+devlink_trap_group_set_req_set_trap_policer_id(struct devlink_trap_group_set_req *req,
+ __u32 trap_policer_id)
+{
+ req->_present.trap_policer_id = 1;
+ req->trap_policer_id = trap_policer_id;
+}
+
+/*
+ * Set trap group instances.
+ */
+int devlink_trap_group_set(struct ynl_sock *ys,
+ struct devlink_trap_group_set_req *req);
+
/* ============== DEVLINK_CMD_TRAP_POLICER_GET ============== */
/* DEVLINK_CMD_TRAP_POLICER_GET - do */
struct devlink_trap_policer_get_req {
@@ -1657,7 +4393,7 @@ devlink_trap_policer_get_req_dump_set_dev_name(struct devlink_trap_policer_get_r
struct devlink_trap_policer_get_list {
struct devlink_trap_policer_get_list *next;
- struct devlink_trap_policer_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_trap_policer_get_rsp obj __attribute__((aligned(8)));
};
void
@@ -1667,6 +4403,148 @@ struct devlink_trap_policer_get_list *
devlink_trap_policer_get_dump(struct ynl_sock *ys,
struct devlink_trap_policer_get_req_dump *req);
+/* ============== DEVLINK_CMD_TRAP_POLICER_SET ============== */
+/* DEVLINK_CMD_TRAP_POLICER_SET - do */
+struct devlink_trap_policer_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 trap_policer_id:1;
+ __u32 trap_policer_rate:1;
+ __u32 trap_policer_burst:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 trap_policer_id;
+ __u64 trap_policer_rate;
+ __u64 trap_policer_burst;
+};
+
+static inline struct devlink_trap_policer_set_req *
+devlink_trap_policer_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_trap_policer_set_req));
+}
+void
+devlink_trap_policer_set_req_free(struct devlink_trap_policer_set_req *req);
+
+static inline void
+devlink_trap_policer_set_req_set_bus_name(struct devlink_trap_policer_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_trap_policer_set_req_set_dev_name(struct devlink_trap_policer_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_trap_policer_set_req_set_trap_policer_id(struct devlink_trap_policer_set_req *req,
+ __u32 trap_policer_id)
+{
+ req->_present.trap_policer_id = 1;
+ req->trap_policer_id = trap_policer_id;
+}
+static inline void
+devlink_trap_policer_set_req_set_trap_policer_rate(struct devlink_trap_policer_set_req *req,
+ __u64 trap_policer_rate)
+{
+ req->_present.trap_policer_rate = 1;
+ req->trap_policer_rate = trap_policer_rate;
+}
+static inline void
+devlink_trap_policer_set_req_set_trap_policer_burst(struct devlink_trap_policer_set_req *req,
+ __u64 trap_policer_burst)
+{
+ req->_present.trap_policer_burst = 1;
+ req->trap_policer_burst = trap_policer_burst;
+}
+
+/*
+ * Get trap policer instances.
+ */
+int devlink_trap_policer_set(struct ynl_sock *ys,
+ struct devlink_trap_policer_set_req *req);
+
+/* ============== DEVLINK_CMD_HEALTH_REPORTER_TEST ============== */
+/* DEVLINK_CMD_HEALTH_REPORTER_TEST - do */
+struct devlink_health_reporter_test_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 port_index:1;
+ __u32 health_reporter_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 port_index;
+ char *health_reporter_name;
+};
+
+static inline struct devlink_health_reporter_test_req *
+devlink_health_reporter_test_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_health_reporter_test_req));
+}
+void
+devlink_health_reporter_test_req_free(struct devlink_health_reporter_test_req *req);
+
+static inline void
+devlink_health_reporter_test_req_set_bus_name(struct devlink_health_reporter_test_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_test_req_set_dev_name(struct devlink_health_reporter_test_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_health_reporter_test_req_set_port_index(struct devlink_health_reporter_test_req *req,
+ __u32 port_index)
+{
+ req->_present.port_index = 1;
+ req->port_index = port_index;
+}
+static inline void
+devlink_health_reporter_test_req_set_health_reporter_name(struct devlink_health_reporter_test_req *req,
+ const char *health_reporter_name)
+{
+ free(req->health_reporter_name);
+ req->_present.health_reporter_name_len = strlen(health_reporter_name);
+ req->health_reporter_name = malloc(req->_present.health_reporter_name_len + 1);
+ memcpy(req->health_reporter_name, health_reporter_name, req->_present.health_reporter_name_len);
+ req->health_reporter_name[req->_present.health_reporter_name_len] = 0;
+}
+
+/*
+ * Test health reporter instances.
+ */
+int devlink_health_reporter_test(struct ynl_sock *ys,
+ struct devlink_health_reporter_test_req *req);
+
/* ============== DEVLINK_CMD_RATE_GET ============== */
/* DEVLINK_CMD_RATE_GET - do */
struct devlink_rate_get_req {
@@ -1790,7 +4668,7 @@ devlink_rate_get_req_dump_set_dev_name(struct devlink_rate_get_req_dump *req,
struct devlink_rate_get_list {
struct devlink_rate_get_list *next;
- struct devlink_rate_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_rate_get_rsp obj __attribute__((aligned(8)));
};
void devlink_rate_get_list_free(struct devlink_rate_get_list *rsp);
@@ -1799,6 +4677,270 @@ struct devlink_rate_get_list *
devlink_rate_get_dump(struct ynl_sock *ys,
struct devlink_rate_get_req_dump *req);
+/* ============== DEVLINK_CMD_RATE_SET ============== */
+/* DEVLINK_CMD_RATE_SET - do */
+struct devlink_rate_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 rate_node_name_len;
+ __u32 rate_tx_share:1;
+ __u32 rate_tx_max:1;
+ __u32 rate_tx_priority:1;
+ __u32 rate_tx_weight:1;
+ __u32 rate_parent_node_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *rate_node_name;
+ __u64 rate_tx_share;
+ __u64 rate_tx_max;
+ __u32 rate_tx_priority;
+ __u32 rate_tx_weight;
+ char *rate_parent_node_name;
+};
+
+static inline struct devlink_rate_set_req *devlink_rate_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_rate_set_req));
+}
+void devlink_rate_set_req_free(struct devlink_rate_set_req *req);
+
+static inline void
+devlink_rate_set_req_set_bus_name(struct devlink_rate_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_rate_set_req_set_dev_name(struct devlink_rate_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_rate_set_req_set_rate_node_name(struct devlink_rate_set_req *req,
+ const char *rate_node_name)
+{
+ free(req->rate_node_name);
+ req->_present.rate_node_name_len = strlen(rate_node_name);
+ req->rate_node_name = malloc(req->_present.rate_node_name_len + 1);
+ memcpy(req->rate_node_name, rate_node_name, req->_present.rate_node_name_len);
+ req->rate_node_name[req->_present.rate_node_name_len] = 0;
+}
+static inline void
+devlink_rate_set_req_set_rate_tx_share(struct devlink_rate_set_req *req,
+ __u64 rate_tx_share)
+{
+ req->_present.rate_tx_share = 1;
+ req->rate_tx_share = rate_tx_share;
+}
+static inline void
+devlink_rate_set_req_set_rate_tx_max(struct devlink_rate_set_req *req,
+ __u64 rate_tx_max)
+{
+ req->_present.rate_tx_max = 1;
+ req->rate_tx_max = rate_tx_max;
+}
+static inline void
+devlink_rate_set_req_set_rate_tx_priority(struct devlink_rate_set_req *req,
+ __u32 rate_tx_priority)
+{
+ req->_present.rate_tx_priority = 1;
+ req->rate_tx_priority = rate_tx_priority;
+}
+static inline void
+devlink_rate_set_req_set_rate_tx_weight(struct devlink_rate_set_req *req,
+ __u32 rate_tx_weight)
+{
+ req->_present.rate_tx_weight = 1;
+ req->rate_tx_weight = rate_tx_weight;
+}
+static inline void
+devlink_rate_set_req_set_rate_parent_node_name(struct devlink_rate_set_req *req,
+ const char *rate_parent_node_name)
+{
+ free(req->rate_parent_node_name);
+ req->_present.rate_parent_node_name_len = strlen(rate_parent_node_name);
+ req->rate_parent_node_name = malloc(req->_present.rate_parent_node_name_len + 1);
+ memcpy(req->rate_parent_node_name, rate_parent_node_name, req->_present.rate_parent_node_name_len);
+ req->rate_parent_node_name[req->_present.rate_parent_node_name_len] = 0;
+}
+
+/*
+ * Set rate instances.
+ */
+int devlink_rate_set(struct ynl_sock *ys, struct devlink_rate_set_req *req);
+
+/* ============== DEVLINK_CMD_RATE_NEW ============== */
+/* DEVLINK_CMD_RATE_NEW - do */
+struct devlink_rate_new_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 rate_node_name_len;
+ __u32 rate_tx_share:1;
+ __u32 rate_tx_max:1;
+ __u32 rate_tx_priority:1;
+ __u32 rate_tx_weight:1;
+ __u32 rate_parent_node_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *rate_node_name;
+ __u64 rate_tx_share;
+ __u64 rate_tx_max;
+ __u32 rate_tx_priority;
+ __u32 rate_tx_weight;
+ char *rate_parent_node_name;
+};
+
+static inline struct devlink_rate_new_req *devlink_rate_new_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_rate_new_req));
+}
+void devlink_rate_new_req_free(struct devlink_rate_new_req *req);
+
+static inline void
+devlink_rate_new_req_set_bus_name(struct devlink_rate_new_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_rate_new_req_set_dev_name(struct devlink_rate_new_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_rate_new_req_set_rate_node_name(struct devlink_rate_new_req *req,
+ const char *rate_node_name)
+{
+ free(req->rate_node_name);
+ req->_present.rate_node_name_len = strlen(rate_node_name);
+ req->rate_node_name = malloc(req->_present.rate_node_name_len + 1);
+ memcpy(req->rate_node_name, rate_node_name, req->_present.rate_node_name_len);
+ req->rate_node_name[req->_present.rate_node_name_len] = 0;
+}
+static inline void
+devlink_rate_new_req_set_rate_tx_share(struct devlink_rate_new_req *req,
+ __u64 rate_tx_share)
+{
+ req->_present.rate_tx_share = 1;
+ req->rate_tx_share = rate_tx_share;
+}
+static inline void
+devlink_rate_new_req_set_rate_tx_max(struct devlink_rate_new_req *req,
+ __u64 rate_tx_max)
+{
+ req->_present.rate_tx_max = 1;
+ req->rate_tx_max = rate_tx_max;
+}
+static inline void
+devlink_rate_new_req_set_rate_tx_priority(struct devlink_rate_new_req *req,
+ __u32 rate_tx_priority)
+{
+ req->_present.rate_tx_priority = 1;
+ req->rate_tx_priority = rate_tx_priority;
+}
+static inline void
+devlink_rate_new_req_set_rate_tx_weight(struct devlink_rate_new_req *req,
+ __u32 rate_tx_weight)
+{
+ req->_present.rate_tx_weight = 1;
+ req->rate_tx_weight = rate_tx_weight;
+}
+static inline void
+devlink_rate_new_req_set_rate_parent_node_name(struct devlink_rate_new_req *req,
+ const char *rate_parent_node_name)
+{
+ free(req->rate_parent_node_name);
+ req->_present.rate_parent_node_name_len = strlen(rate_parent_node_name);
+ req->rate_parent_node_name = malloc(req->_present.rate_parent_node_name_len + 1);
+ memcpy(req->rate_parent_node_name, rate_parent_node_name, req->_present.rate_parent_node_name_len);
+ req->rate_parent_node_name[req->_present.rate_parent_node_name_len] = 0;
+}
+
+/*
+ * Create rate instances.
+ */
+int devlink_rate_new(struct ynl_sock *ys, struct devlink_rate_new_req *req);
+
+/* ============== DEVLINK_CMD_RATE_DEL ============== */
+/* DEVLINK_CMD_RATE_DEL - do */
+struct devlink_rate_del_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 rate_node_name_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ char *rate_node_name;
+};
+
+static inline struct devlink_rate_del_req *devlink_rate_del_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_rate_del_req));
+}
+void devlink_rate_del_req_free(struct devlink_rate_del_req *req);
+
+static inline void
+devlink_rate_del_req_set_bus_name(struct devlink_rate_del_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_rate_del_req_set_dev_name(struct devlink_rate_del_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_rate_del_req_set_rate_node_name(struct devlink_rate_del_req *req,
+ const char *rate_node_name)
+{
+ free(req->rate_node_name);
+ req->_present.rate_node_name_len = strlen(rate_node_name);
+ req->rate_node_name = malloc(req->_present.rate_node_name_len + 1);
+ memcpy(req->rate_node_name, rate_node_name, req->_present.rate_node_name_len);
+ req->rate_node_name[req->_present.rate_node_name_len] = 0;
+}
+
+/*
+ * Delete rate instances.
+ */
+int devlink_rate_del(struct ynl_sock *ys, struct devlink_rate_del_req *req);
+
/* ============== DEVLINK_CMD_LINECARD_GET ============== */
/* DEVLINK_CMD_LINECARD_GET - do */
struct devlink_linecard_get_req {
@@ -1910,7 +5052,7 @@ devlink_linecard_get_req_dump_set_dev_name(struct devlink_linecard_get_req_dump
struct devlink_linecard_get_list {
struct devlink_linecard_get_list *next;
- struct devlink_linecard_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_linecard_get_rsp obj __attribute__((aligned(8)));
};
void devlink_linecard_get_list_free(struct devlink_linecard_get_list *rsp);
@@ -1919,6 +5061,73 @@ struct devlink_linecard_get_list *
devlink_linecard_get_dump(struct ynl_sock *ys,
struct devlink_linecard_get_req_dump *req);
+/* ============== DEVLINK_CMD_LINECARD_SET ============== */
+/* DEVLINK_CMD_LINECARD_SET - do */
+struct devlink_linecard_set_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 linecard_index:1;
+ __u32 linecard_type_len;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ __u32 linecard_index;
+ char *linecard_type;
+};
+
+static inline struct devlink_linecard_set_req *
+devlink_linecard_set_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_linecard_set_req));
+}
+void devlink_linecard_set_req_free(struct devlink_linecard_set_req *req);
+
+static inline void
+devlink_linecard_set_req_set_bus_name(struct devlink_linecard_set_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_linecard_set_req_set_dev_name(struct devlink_linecard_set_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_linecard_set_req_set_linecard_index(struct devlink_linecard_set_req *req,
+ __u32 linecard_index)
+{
+ req->_present.linecard_index = 1;
+ req->linecard_index = linecard_index;
+}
+static inline void
+devlink_linecard_set_req_set_linecard_type(struct devlink_linecard_set_req *req,
+ const char *linecard_type)
+{
+ free(req->linecard_type);
+ req->_present.linecard_type_len = strlen(linecard_type);
+ req->linecard_type = malloc(req->_present.linecard_type_len + 1);
+ memcpy(req->linecard_type, linecard_type, req->_present.linecard_type_len);
+ req->linecard_type[req->_present.linecard_type_len] = 0;
+}
+
+/*
+ * Set line card instances.
+ */
+int devlink_linecard_set(struct ynl_sock *ys,
+ struct devlink_linecard_set_req *req);
+
/* ============== DEVLINK_CMD_SELFTESTS_GET ============== */
/* DEVLINK_CMD_SELFTESTS_GET - do */
struct devlink_selftests_get_req {
@@ -1981,7 +5190,7 @@ devlink_selftests_get(struct ynl_sock *ys,
/* DEVLINK_CMD_SELFTESTS_GET - dump */
struct devlink_selftests_get_list {
struct devlink_selftests_get_list *next;
- struct devlink_selftests_get_rsp obj __attribute__ ((aligned (8)));
+ struct devlink_selftests_get_rsp obj __attribute__((aligned(8)));
};
void devlink_selftests_get_list_free(struct devlink_selftests_get_list *rsp);
@@ -1989,4 +5198,58 @@ void devlink_selftests_get_list_free(struct devlink_selftests_get_list *rsp);
struct devlink_selftests_get_list *
devlink_selftests_get_dump(struct ynl_sock *ys);
+/* ============== DEVLINK_CMD_SELFTESTS_RUN ============== */
+/* DEVLINK_CMD_SELFTESTS_RUN - do */
+struct devlink_selftests_run_req {
+ struct {
+ __u32 bus_name_len;
+ __u32 dev_name_len;
+ __u32 selftests:1;
+ } _present;
+
+ char *bus_name;
+ char *dev_name;
+ struct devlink_dl_selftest_id selftests;
+};
+
+static inline struct devlink_selftests_run_req *
+devlink_selftests_run_req_alloc(void)
+{
+ return calloc(1, sizeof(struct devlink_selftests_run_req));
+}
+void devlink_selftests_run_req_free(struct devlink_selftests_run_req *req);
+
+static inline void
+devlink_selftests_run_req_set_bus_name(struct devlink_selftests_run_req *req,
+ const char *bus_name)
+{
+ free(req->bus_name);
+ req->_present.bus_name_len = strlen(bus_name);
+ req->bus_name = malloc(req->_present.bus_name_len + 1);
+ memcpy(req->bus_name, bus_name, req->_present.bus_name_len);
+ req->bus_name[req->_present.bus_name_len] = 0;
+}
+static inline void
+devlink_selftests_run_req_set_dev_name(struct devlink_selftests_run_req *req,
+ const char *dev_name)
+{
+ free(req->dev_name);
+ req->_present.dev_name_len = strlen(dev_name);
+ req->dev_name = malloc(req->_present.dev_name_len + 1);
+ memcpy(req->dev_name, dev_name, req->_present.dev_name_len);
+ req->dev_name[req->_present.dev_name_len] = 0;
+}
+static inline void
+devlink_selftests_run_req_set_selftests_flash(struct devlink_selftests_run_req *req)
+{
+ req->_present.selftests = 1;
+ req->selftests._present.flash = 1;
+}
+
+/*
+ * Run device selftest instances.
+ */
+int devlink_selftests_run(struct ynl_sock *ys,
+ struct devlink_selftests_run_req *req);
+
#endif /* _LINUX_DEVLINK_GEN_H */
diff --git a/tools/net/ynl/generated/ethtool-user.h b/tools/net/ynl/generated/ethtool-user.h
index ddc1a5209992..ca0ec5fd7798 100644
--- a/tools/net/ynl/generated/ethtool-user.h
+++ b/tools/net/ynl/generated/ethtool-user.h
@@ -347,7 +347,7 @@ ethtool_strset_get_req_dump_set_counts_only(struct ethtool_strset_get_req_dump *
struct ethtool_strset_get_list {
struct ethtool_strset_get_list *next;
- struct ethtool_strset_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_strset_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_strset_get_list_free(struct ethtool_strset_get_list *rsp);
@@ -472,7 +472,7 @@ ethtool_linkinfo_get_req_dump_set_header_flags(struct ethtool_linkinfo_get_req_d
struct ethtool_linkinfo_get_list {
struct ethtool_linkinfo_get_list *next;
- struct ethtool_linkinfo_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_linkinfo_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_linkinfo_get_list_free(struct ethtool_linkinfo_get_list *rsp);
@@ -487,7 +487,7 @@ struct ethtool_linkinfo_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_linkinfo_get_ntf *ntf);
- struct ethtool_linkinfo_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_linkinfo_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_linkinfo_get_ntf_free(struct ethtool_linkinfo_get_ntf *rsp);
@@ -712,7 +712,7 @@ ethtool_linkmodes_get_req_dump_set_header_flags(struct ethtool_linkmodes_get_req
struct ethtool_linkmodes_get_list {
struct ethtool_linkmodes_get_list *next;
- struct ethtool_linkmodes_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_linkmodes_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_linkmodes_get_list_free(struct ethtool_linkmodes_get_list *rsp);
@@ -727,7 +727,7 @@ struct ethtool_linkmodes_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_linkmodes_get_ntf *ntf);
- struct ethtool_linkmodes_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_linkmodes_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_linkmodes_get_ntf_free(struct ethtool_linkmodes_get_ntf *rsp);
@@ -1014,7 +1014,7 @@ ethtool_linkstate_get_req_dump_set_header_flags(struct ethtool_linkstate_get_req
struct ethtool_linkstate_get_list {
struct ethtool_linkstate_get_list *next;
- struct ethtool_linkstate_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_linkstate_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_linkstate_get_list_free(struct ethtool_linkstate_get_list *rsp);
@@ -1129,7 +1129,7 @@ ethtool_debug_get_req_dump_set_header_flags(struct ethtool_debug_get_req_dump *r
struct ethtool_debug_get_list {
struct ethtool_debug_get_list *next;
- struct ethtool_debug_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_debug_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_debug_get_list_free(struct ethtool_debug_get_list *rsp);
@@ -1144,7 +1144,7 @@ struct ethtool_debug_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_debug_get_ntf *ntf);
- struct ethtool_debug_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_debug_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_debug_get_ntf_free(struct ethtool_debug_get_ntf *rsp);
@@ -1330,7 +1330,7 @@ ethtool_wol_get_req_dump_set_header_flags(struct ethtool_wol_get_req_dump *req,
struct ethtool_wol_get_list {
struct ethtool_wol_get_list *next;
- struct ethtool_wol_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_wol_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_wol_get_list_free(struct ethtool_wol_get_list *rsp);
@@ -1344,7 +1344,7 @@ struct ethtool_wol_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_wol_get_ntf *ntf);
- struct ethtool_wol_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_wol_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_wol_get_ntf_free(struct ethtool_wol_get_ntf *rsp);
@@ -1546,7 +1546,7 @@ ethtool_features_get_req_dump_set_header_flags(struct ethtool_features_get_req_d
struct ethtool_features_get_list {
struct ethtool_features_get_list *next;
- struct ethtool_features_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_features_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_features_get_list_free(struct ethtool_features_get_list *rsp);
@@ -1561,7 +1561,7 @@ struct ethtool_features_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_features_get_ntf *ntf);
- struct ethtool_features_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_features_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_features_get_ntf_free(struct ethtool_features_get_ntf *rsp);
@@ -1843,7 +1843,7 @@ ethtool_privflags_get_req_dump_set_header_flags(struct ethtool_privflags_get_req
struct ethtool_privflags_get_list {
struct ethtool_privflags_get_list *next;
- struct ethtool_privflags_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_privflags_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_privflags_get_list_free(struct ethtool_privflags_get_list *rsp);
@@ -1858,7 +1858,7 @@ struct ethtool_privflags_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_privflags_get_ntf *ntf);
- struct ethtool_privflags_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_privflags_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_privflags_get_ntf_free(struct ethtool_privflags_get_ntf *rsp);
@@ -2072,7 +2072,7 @@ ethtool_rings_get_req_dump_set_header_flags(struct ethtool_rings_get_req_dump *r
struct ethtool_rings_get_list {
struct ethtool_rings_get_list *next;
- struct ethtool_rings_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_rings_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_rings_get_list_free(struct ethtool_rings_get_list *rsp);
@@ -2087,7 +2087,7 @@ struct ethtool_rings_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_rings_get_ntf *ntf);
- struct ethtool_rings_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_rings_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_rings_get_ntf_free(struct ethtool_rings_get_ntf *rsp);
@@ -2395,7 +2395,7 @@ ethtool_channels_get_req_dump_set_header_flags(struct ethtool_channels_get_req_d
struct ethtool_channels_get_list {
struct ethtool_channels_get_list *next;
- struct ethtool_channels_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_channels_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_channels_get_list_free(struct ethtool_channels_get_list *rsp);
@@ -2410,7 +2410,7 @@ struct ethtool_channels_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_channels_get_ntf *ntf);
- struct ethtool_channels_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_channels_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_channels_get_ntf_free(struct ethtool_channels_get_ntf *rsp);
@@ -2697,7 +2697,7 @@ ethtool_coalesce_get_req_dump_set_header_flags(struct ethtool_coalesce_get_req_d
struct ethtool_coalesce_get_list {
struct ethtool_coalesce_get_list *next;
- struct ethtool_coalesce_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_coalesce_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_coalesce_get_list_free(struct ethtool_coalesce_get_list *rsp);
@@ -2712,7 +2712,7 @@ struct ethtool_coalesce_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_coalesce_get_ntf *ntf);
- struct ethtool_coalesce_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_coalesce_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_coalesce_get_ntf_free(struct ethtool_coalesce_get_ntf *rsp);
@@ -3124,7 +3124,7 @@ ethtool_pause_get_req_dump_set_header_flags(struct ethtool_pause_get_req_dump *r
struct ethtool_pause_get_list {
struct ethtool_pause_get_list *next;
- struct ethtool_pause_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_pause_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_pause_get_list_free(struct ethtool_pause_get_list *rsp);
@@ -3139,7 +3139,7 @@ struct ethtool_pause_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_pause_get_ntf *ntf);
- struct ethtool_pause_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_pause_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_pause_get_ntf_free(struct ethtool_pause_get_ntf *rsp);
@@ -3360,7 +3360,7 @@ ethtool_eee_get_req_dump_set_header_flags(struct ethtool_eee_get_req_dump *req,
struct ethtool_eee_get_list {
struct ethtool_eee_get_list *next;
- struct ethtool_eee_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_eee_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_eee_get_list_free(struct ethtool_eee_get_list *rsp);
@@ -3374,7 +3374,7 @@ struct ethtool_eee_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_eee_get_ntf *ntf);
- struct ethtool_eee_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_eee_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_eee_get_ntf_free(struct ethtool_eee_get_ntf *rsp);
@@ -3623,7 +3623,7 @@ ethtool_tsinfo_get_req_dump_set_header_flags(struct ethtool_tsinfo_get_req_dump
struct ethtool_tsinfo_get_list {
struct ethtool_tsinfo_get_list *next;
- struct ethtool_tsinfo_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_tsinfo_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_tsinfo_get_list_free(struct ethtool_tsinfo_get_list *rsp);
@@ -3842,7 +3842,7 @@ ethtool_tunnel_info_get_req_dump_set_header_flags(struct ethtool_tunnel_info_get
struct ethtool_tunnel_info_get_list {
struct ethtool_tunnel_info_get_list *next;
- struct ethtool_tunnel_info_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_tunnel_info_get_rsp obj __attribute__((aligned(8)));
};
void
@@ -3964,7 +3964,7 @@ ethtool_fec_get_req_dump_set_header_flags(struct ethtool_fec_get_req_dump *req,
struct ethtool_fec_get_list {
struct ethtool_fec_get_list *next;
- struct ethtool_fec_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_fec_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_fec_get_list_free(struct ethtool_fec_get_list *rsp);
@@ -3978,7 +3978,7 @@ struct ethtool_fec_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_fec_get_ntf *ntf);
- struct ethtool_fec_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_fec_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_fec_get_ntf_free(struct ethtool_fec_get_ntf *rsp);
@@ -4221,7 +4221,7 @@ ethtool_module_eeprom_get_req_dump_set_header_flags(struct ethtool_module_eeprom
struct ethtool_module_eeprom_get_list {
struct ethtool_module_eeprom_get_list *next;
- struct ethtool_module_eeprom_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_module_eeprom_get_rsp obj __attribute__((aligned(8)));
};
void
@@ -4340,7 +4340,7 @@ ethtool_phc_vclocks_get_req_dump_set_header_flags(struct ethtool_phc_vclocks_get
struct ethtool_phc_vclocks_get_list {
struct ethtool_phc_vclocks_get_list *next;
- struct ethtool_phc_vclocks_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_phc_vclocks_get_rsp obj __attribute__((aligned(8)));
};
void
@@ -4458,7 +4458,7 @@ ethtool_module_get_req_dump_set_header_flags(struct ethtool_module_get_req_dump
struct ethtool_module_get_list {
struct ethtool_module_get_list *next;
- struct ethtool_module_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_module_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_module_get_list_free(struct ethtool_module_get_list *rsp);
@@ -4473,7 +4473,7 @@ struct ethtool_module_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_module_get_ntf *ntf);
- struct ethtool_module_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_module_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_module_get_ntf_free(struct ethtool_module_get_ntf *rsp);
@@ -4654,7 +4654,7 @@ ethtool_pse_get_req_dump_set_header_flags(struct ethtool_pse_get_req_dump *req,
struct ethtool_pse_get_list {
struct ethtool_pse_get_list *next;
- struct ethtool_pse_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_pse_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_pse_get_list_free(struct ethtool_pse_get_list *rsp);
@@ -4849,7 +4849,7 @@ ethtool_rss_get_req_dump_set_header_flags(struct ethtool_rss_get_req_dump *req,
struct ethtool_rss_get_list {
struct ethtool_rss_get_list *next;
- struct ethtool_rss_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_rss_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_rss_get_list_free(struct ethtool_rss_get_list *rsp);
@@ -4979,7 +4979,7 @@ ethtool_plca_get_cfg_req_dump_set_header_flags(struct ethtool_plca_get_cfg_req_d
struct ethtool_plca_get_cfg_list {
struct ethtool_plca_get_cfg_list *next;
- struct ethtool_plca_get_cfg_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_plca_get_cfg_rsp obj __attribute__((aligned(8)));
};
void ethtool_plca_get_cfg_list_free(struct ethtool_plca_get_cfg_list *rsp);
@@ -4994,7 +4994,7 @@ struct ethtool_plca_get_cfg_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_plca_get_cfg_ntf *ntf);
- struct ethtool_plca_get_cfg_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_plca_get_cfg_rsp obj __attribute__((aligned(8)));
};
void ethtool_plca_get_cfg_ntf_free(struct ethtool_plca_get_cfg_ntf *rsp);
@@ -5244,7 +5244,7 @@ ethtool_plca_get_status_req_dump_set_header_flags(struct ethtool_plca_get_status
struct ethtool_plca_get_status_list {
struct ethtool_plca_get_status_list *next;
- struct ethtool_plca_get_status_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_plca_get_status_rsp obj __attribute__((aligned(8)));
};
void
@@ -5376,7 +5376,7 @@ ethtool_mm_get_req_dump_set_header_flags(struct ethtool_mm_get_req_dump *req,
struct ethtool_mm_get_list {
struct ethtool_mm_get_list *next;
- struct ethtool_mm_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_mm_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_mm_get_list_free(struct ethtool_mm_get_list *rsp);
@@ -5390,7 +5390,7 @@ struct ethtool_mm_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_mm_get_ntf *ntf);
- struct ethtool_mm_get_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_mm_get_rsp obj __attribute__((aligned(8)));
};
void ethtool_mm_get_ntf_free(struct ethtool_mm_get_ntf *rsp);
@@ -5504,7 +5504,7 @@ struct ethtool_cable_test_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_cable_test_ntf *ntf);
- struct ethtool_cable_test_ntf_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_cable_test_ntf_rsp obj __attribute__((aligned(8)));
};
void ethtool_cable_test_ntf_free(struct ethtool_cable_test_ntf *rsp);
@@ -5527,7 +5527,7 @@ struct ethtool_cable_test_tdr_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ethtool_cable_test_tdr_ntf *ntf);
- struct ethtool_cable_test_tdr_ntf_rsp obj __attribute__ ((aligned (8)));
+ struct ethtool_cable_test_tdr_ntf_rsp obj __attribute__((aligned(8)));
};
void ethtool_cable_test_tdr_ntf_free(struct ethtool_cable_test_tdr_ntf *rsp);
diff --git a/tools/net/ynl/generated/fou-user.h b/tools/net/ynl/generated/fou-user.h
index a8f860892540..fd566716ddd6 100644
--- a/tools/net/ynl/generated/fou-user.h
+++ b/tools/net/ynl/generated/fou-user.h
@@ -333,7 +333,7 @@ struct fou_get_rsp *fou_get(struct ynl_sock *ys, struct fou_get_req *req);
/* FOU_CMD_GET - dump */
struct fou_get_list {
struct fou_get_list *next;
- struct fou_get_rsp obj __attribute__ ((aligned (8)));
+ struct fou_get_rsp obj __attribute__((aligned(8)));
};
void fou_get_list_free(struct fou_get_list *rsp);
diff --git a/tools/net/ynl/generated/handshake-user.h b/tools/net/ynl/generated/handshake-user.h
index 2b34acc608de..bce537d8b8cc 100644
--- a/tools/net/ynl/generated/handshake-user.h
+++ b/tools/net/ynl/generated/handshake-user.h
@@ -90,7 +90,7 @@ struct handshake_accept_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct handshake_accept_ntf *ntf);
- struct handshake_accept_rsp obj __attribute__ ((aligned (8)));
+ struct handshake_accept_rsp obj __attribute__((aligned(8)));
};
void handshake_accept_ntf_free(struct handshake_accept_ntf *rsp);
diff --git a/tools/net/ynl/generated/netdev-user.h b/tools/net/ynl/generated/netdev-user.h
index b4351ff34595..4fafac879df3 100644
--- a/tools/net/ynl/generated/netdev-user.h
+++ b/tools/net/ynl/generated/netdev-user.h
@@ -69,7 +69,7 @@ netdev_dev_get(struct ynl_sock *ys, struct netdev_dev_get_req *req);
/* NETDEV_CMD_DEV_GET - dump */
struct netdev_dev_get_list {
struct netdev_dev_get_list *next;
- struct netdev_dev_get_rsp obj __attribute__ ((aligned (8)));
+ struct netdev_dev_get_rsp obj __attribute__((aligned(8)));
};
void netdev_dev_get_list_free(struct netdev_dev_get_list *rsp);
@@ -82,7 +82,7 @@ struct netdev_dev_get_ntf {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct netdev_dev_get_ntf *ntf);
- struct netdev_dev_get_rsp obj __attribute__ ((aligned (8)));
+ struct netdev_dev_get_rsp obj __attribute__((aligned(8)));
};
void netdev_dev_get_ntf_free(struct netdev_dev_get_ntf *rsp);
diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c
index 350ddc247450..830d25097009 100644
--- a/tools/net/ynl/lib/ynl.c
+++ b/tools/net/ynl/lib/ynl.c
@@ -379,6 +379,12 @@ int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr)
yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
"Invalid attribute (string %s)", policy->name);
return -1;
+ case YNL_PT_BITFIELD32:
+ if (len == sizeof(struct nla_bitfield32))
+ break;
+ yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
+ "Invalid attribute (bitfield32 %s)", policy->name);
+ return -1;
default:
yerr(yarg->ys, YNL_ERROR_ATTR_INVALID,
"Invalid attribute (unknown %s)", policy->name);
diff --git a/tools/net/ynl/lib/ynl.h b/tools/net/ynl/lib/ynl.h
index 87b4dad832f0..e974378e3b8c 100644
--- a/tools/net/ynl/lib/ynl.h
+++ b/tools/net/ynl/lib/ynl.h
@@ -135,6 +135,7 @@ enum ynl_policy_type {
YNL_PT_U64,
YNL_PT_UINT,
YNL_PT_NUL_STR,
+ YNL_PT_BITFIELD32,
};
struct ynl_policy_attr {
@@ -157,7 +158,7 @@ struct ynl_parse_arg {
struct ynl_dump_list_type {
struct ynl_dump_list_type *next;
- unsigned char data[] __attribute__ ((aligned (8)));
+ unsigned char data[] __attribute__((aligned(8)));
};
extern struct ynl_dump_list_type *YNL_LIST_END;
@@ -187,7 +188,7 @@ struct ynl_ntf_base_type {
__u8 cmd;
struct ynl_ntf_base_type *next;
void (*free)(struct ynl_ntf_base_type *ntf);
- unsigned char data[] __attribute__ ((aligned (8)));
+ unsigned char data[] __attribute__((aligned(8)));
};
extern mnl_cb_t ynl_cb_array[NLMSG_MIN_TYPE];
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
index 3b36553a66cc..b1da4aea9336 100644
--- a/tools/net/ynl/lib/ynl.py
+++ b/tools/net/ynl/lib/ynl.py
@@ -478,6 +478,8 @@ class YnlFamily(SpecFamily):
elif attr['type'] in NlAttr.type_formats:
format = NlAttr.get_format(attr['type'], attr.byte_order)
attr_payload = format.pack(int(value))
+ elif attr['type'] in "bitfield32":
+ attr_payload = struct.pack("II", int(value["value"]), int(value["selector"]))
else:
raise Exception(f'Unknown type at {space} {name} {value} {attr["type"]}')
@@ -545,14 +547,19 @@ class YnlFamily(SpecFamily):
decoded = attr.as_auto_scalar(attr_spec['type'], attr_spec.byte_order)
elif attr_spec["type"] in NlAttr.type_formats:
decoded = attr.as_scalar(attr_spec['type'], attr_spec.byte_order)
+ if 'enum' in attr_spec:
+ decoded = self._decode_enum(decoded, attr_spec)
elif attr_spec["type"] == 'array-nest':
decoded = self._decode_array_nest(attr, attr_spec)
+ elif attr_spec["type"] == 'bitfield32':
+ value, selector = struct.unpack("II", attr.raw)
+ if 'enum' in attr_spec:
+ value = self._decode_enum(value, attr_spec)
+ selector = self._decode_enum(selector, attr_spec)
+ decoded = {"value": value, "selector": selector}
else:
raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}')
- if 'enum' in attr_spec:
- decoded = self._decode_enum(decoded, attr_spec)
-
if not attr_spec.is_multi:
rsp[attr_spec['name']] = decoded
elif attr_spec.name in rsp:
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index a9e8898c9386..0fee68863db4 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -410,10 +410,13 @@ class TypeString(Type):
return f'.type = YNL_PT_NUL_STR, '
def _attr_policy(self, policy):
- mem = '{ .type = ' + policy
- if 'max-len' in self.checks:
- mem += ', .len = ' + str(self.get_limit('max-len'))
- mem += ', }'
+ if 'exact-len' in self.checks:
+ mem = 'NLA_POLICY_EXACT_LEN(' + str(self.checks['exact-len']) + ')'
+ else:
+ mem = '{ .type = ' + policy
+ if 'max-len' in self.checks:
+ mem += ', .len = ' + str(self.get_limit('max-len'))
+ mem += ', }'
return mem
def attr_policy(self, cw):
@@ -459,14 +462,17 @@ class TypeBinary(Type):
return f'.type = YNL_PT_BINARY,'
def _attr_policy(self, policy):
- mem = '{ '
- if len(self.checks) == 1 and 'min-len' in self.checks:
- mem += '.len = ' + str(self.get_limit('min-len'))
- elif len(self.checks) == 0:
- mem += '.type = NLA_BINARY'
+ if 'exact-len' in self.checks:
+ mem = 'NLA_POLICY_EXACT_LEN(' + str(self.checks['exact-len']) + ')'
else:
- raise Exception('One or more of binary type checks not implemented, yet')
- mem += ', }'
+ mem = '{ '
+ if len(self.checks) == 1 and 'min-len' in self.checks:
+ mem += '.len = ' + str(self.get_limit('min-len'))
+ elif len(self.checks) == 0:
+ mem += '.type = NLA_BINARY'
+ else:
+ raise Exception('One or more of binary type checks not implemented, yet')
+ mem += ', }'
return mem
def attr_put(self, ri, var):
@@ -488,6 +494,31 @@ class TypeBinary(Type):
f'memcpy({member}, {self.c_name}, {presence}_len);']
+class TypeBitfield32(Type):
+ def _complex_member_type(self, ri):
+ return "struct nla_bitfield32"
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_BITFIELD32, '
+
+ def _attr_policy(self, policy):
+ if not 'enum' in self.attr:
+ raise Exception('Enum required for bitfield32 attr')
+ enum = self.family.consts[self.attr['enum']]
+ mask = enum.get_mask(as_flags=True)
+ return f"NLA_POLICY_BITFIELD32({mask})"
+
+ def attr_put(self, ri, var):
+ line = f"mnl_attr_put(nlh, {self.enum_name}, sizeof(struct nla_bitfield32), &{var}->{self.c_name})"
+ self._attr_put_line(ri, var, line)
+
+ def _attr_get(self, ri, var):
+ return f"memcpy(&{var}->{self.c_name}, mnl_attr_get_payload(attr), sizeof(struct nla_bitfield32));", None, None
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"memcpy(&{member}, {self.c_name}, sizeof(struct nla_bitfield32));"]
+
+
class TypeNest(Type):
def _complex_member_type(self, ri):
return self.nested_struct_type
@@ -786,6 +817,8 @@ class AttrSet(SpecAttrSet):
t = TypeString(self.family, self, elem, value)
elif elem['type'] == 'binary':
t = TypeBinary(self.family, self, elem, value)
+ elif elem['type'] == 'bitfield32':
+ t = TypeBitfield32(self.family, self, elem, value)
elif elem['type'] == 'nest':
t = TypeNest(self.family, self, elem, value)
elif elem['type'] == 'array-nest':
@@ -1085,10 +1118,13 @@ class RenderInfo:
# 'do' and 'dump' response parsing is identical
self.type_consistent = True
- if op_mode != 'do' and 'dump' in op and 'do' in op:
- if ('reply' in op['do']) != ('reply' in op["dump"]):
- self.type_consistent = False
- elif 'reply' in op['do'] and op["do"]["reply"] != op["dump"]["reply"]:
+ if op_mode != 'do' and 'dump' in op:
+ if 'do' in op:
+ if ('reply' in op['do']) != ('reply' in op["dump"]):
+ self.type_consistent = False
+ elif 'reply' in op['do'] and op["do"]["reply"] != op["dump"]["reply"]:
+ self.type_consistent = False
+ else:
self.type_consistent = False
self.attr_set = attr_set
@@ -1872,7 +1908,7 @@ def print_wrapped_type(ri):
ri.cw.p('__u8 cmd;')
ri.cw.p('struct ynl_ntf_base_type *next;')
ri.cw.p(f"void (*free)({type_name(ri, 'reply')} *ntf);")
- ri.cw.p(f"{type_name(ri, 'reply', deref=True)} obj __attribute__ ((aligned (8)));")
+ ri.cw.p(f"{type_name(ri, 'reply', deref=True)} obj __attribute__((aligned(8)));")
ri.cw.block_end(line=';')
ri.cw.nl()
print_free_prototype(ri, 'reply')
@@ -2414,6 +2450,16 @@ def render_user_family(family, cw, prototype):
cw.block_end(line=';')
+def family_contains_bitfield32(family):
+ for _, attr_set in family.attr_sets.items():
+ if attr_set.subset_of:
+ continue
+ for _, attr in attr_set.items():
+ if attr.type == "bitfield32":
+ return True
+ return False
+
+
def find_kernel_root(full_path):
sub_path = ''
while True:
@@ -2499,6 +2545,8 @@ def main():
cw.p('#include <string.h>')
if args.header:
cw.p('#include <linux/types.h>')
+ if family_contains_bitfield32(parsed):
+ cw.p('#include <linux/netlink.h>')
else:
cw.p(f'#include "{parsed.name}-user.h"')
cw.p('#include "ynl.h"')
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
index 07d786329105..e959336c7a73 100644
--- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -177,7 +177,7 @@ static __always_inline __u32 tcp_ns_to_ts(__u64 ns)
return ns / (NSEC_PER_SEC / TCP_TS_HZ);
}
-static __always_inline __u32 tcp_time_stamp_raw(void)
+static __always_inline __u32 tcp_clock_ms(void)
{
return tcp_ns_to_ts(tcp_clock_ns());
}
@@ -274,7 +274,7 @@ static __always_inline bool tscookie_init(struct tcphdr *tcp_header,
if (!loop_ctx.option_timestamp)
return false;
- cookie = tcp_time_stamp_raw() & ~TSMASK;
+ cookie = tcp_clock_ms() & ~TSMASK;
cookie |= loop_ctx.wscale & TS_OPT_WSCALE_MASK;
if (loop_ctx.option_sack)
cookie |= TS_OPT_SACK;
diff --git a/tools/testing/selftests/net/route_localnet.sh b/tools/testing/selftests/net/route_localnet.sh
index 116bfeab72fa..e08701c750e3 100755
--- a/tools/testing/selftests/net/route_localnet.sh
+++ b/tools/testing/selftests/net/route_localnet.sh
@@ -18,8 +18,10 @@ setup() {
ip route del 127.0.0.0/8 dev lo table local
ip netns exec "${PEER_NS}" ip route del 127.0.0.0/8 dev lo table local
- ifconfig veth0 127.25.3.4/24 up
- ip netns exec "${PEER_NS}" ifconfig veth1 127.25.3.14/24 up
+ ip address add 127.25.3.4/24 dev veth0
+ ip link set dev veth0 up
+ ip netns exec "${PEER_NS}" ip address add 127.25.3.14/24 dev veth1
+ ip netns exec "${PEER_NS}" ip link set dev veth1 up
ip route flush cache
ip netns exec "${PEER_NS}" ip route flush cache