diff options
393 files changed, 16385 insertions, 4390 deletions
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml index 0e7c31794ae6..ac471b60ed6a 100644 --- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml @@ -121,6 +121,11 @@ properties: and is useful for determining certain configuration settings such as flow control thresholds. + sfp: + $ref: /schemas/types.yaml#definitions/phandle + description: + Specifies a reference to a node representing a SFP cage. + tx-fifo-depth: $ref: /schemas/types.yaml#definitions/uint32 description: diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml index f70f18ff821f..8927941c74bb 100644 --- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml @@ -153,6 +153,11 @@ properties: Delay after the reset was deasserted in microseconds. If this property is missing the delay will be skipped. + sfp: + $ref: /schemas/types.yaml#definitions/phandle + description: + Specifies a reference to a node representing a SFP cage. + required: - reg diff --git a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml new file mode 100644 index 000000000000..81ae8cafabc1 --- /dev/null +++ b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml @@ -0,0 +1,240 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/ti,cpsw-switch.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TI SoC Ethernet Switch Controller (CPSW) Device Tree Bindings + +maintainers: + - Grygorii Strashko <[email protected]> + - Sekhar Nori <[email protected]> + +description: + The 3-port switch gigabit ethernet subsystem provides ethernet packet + communication and can be configured as an ethernet switch. It provides the + gigabit media independent interface (GMII),reduced gigabit media + independent interface (RGMII), reduced media independent interface (RMII), + the management data input output (MDIO) for physical layer device (PHY) + management. + +properties: + compatible: + oneOf: + - const: ti,cpsw-switch + - items: + - const: ti,am335x-cpsw-switch + - const: ti,cpsw-switch + - items: + - const: ti,am4372-cpsw-switch + - const: ti,cpsw-switch + - items: + - const: ti,dra7-cpsw-switch + - const: ti,cpsw-switch + + reg: + maxItems: 1 + description: + The physical base address and size of full the CPSW module IO range + + ranges: true + + clocks: + maxItems: 1 + description: CPSW functional clock + + clock-names: + maxItems: 1 + items: + - const: fck + + interrupts: + items: + - description: RX_THRESH interrupt + - description: RX interrupt + - description: TX interrupt + - description: MISC interrupt + + interrupt-names: + items: + - const: "rx_thresh" + - const: "rx" + - const: "tx" + - const: "misc" + + pinctrl-names: true + + syscon: + $ref: /schemas/types.yaml#definitions/phandle + description: + Phandle to the system control device node which provides access to + efuse IO range with MAC addresses + + + ethernet-ports: + type: object + properties: + '#address-cells': + const: 1 + '#size-cells': + const: 0 + + patternProperties: + "^port@[0-9]+$": + type: object + minItems: 1 + maxItems: 2 + description: CPSW external ports + + allOf: + - $ref: ethernet-controller.yaml# + + properties: + reg: + maxItems: 1 + enum: [1, 2] + description: CPSW port number + + phys: + $ref: /schemas/types.yaml#definitions/phandle-array + maxItems: 1 + description: phandle on phy-gmii-sel PHY + + label: + $ref: /schemas/types.yaml#/definitions/string-array + maxItems: 1 + description: label associated with this port + + ti,dual-emac-pvid: + $ref: /schemas/types.yaml#/definitions/uint32 + maxItems: 1 + minimum: 1 + maximum: 1024 + description: + Specifies default PORT VID to be used to segregate + ports. Default value - CPSW port number. + + required: + - reg + - phys + + mdio: + type: object + allOf: + - $ref: "ti,davinci-mdio.yaml#" + description: + CPSW MDIO bus. + + cpts: + type: object + description: + The Common Platform Time Sync (CPTS) module + + properties: + clocks: + maxItems: 1 + description: CPTS reference clock + + clock-names: + maxItems: 1 + items: + - const: cpts + + cpts_clock_mult: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Numerator to convert input clock ticks into ns + + cpts_clock_shift: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Denominator to convert input clock ticks into ns. + Mult and shift will be calculated basing on CPTS rftclk frequency if + both cpts_clock_shift and cpts_clock_mult properties are not provided. + + required: + - clocks + - clock-names + +required: + - compatible + - reg + - ranges + - clocks + - clock-names + - interrupts + - interrupt-names + - '#address-cells' + - '#size-cells' + +examples: + - | + #include <dt-bindings/interrupt-controller/irq.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/clock/dra7.h> + + mac_sw: switch@0 { + compatible = "ti,dra7-cpsw-switch","ti,cpsw-switch"; + reg = <0x0 0x4000>; + ranges = <0 0 0x4000>; + clocks = <&gmac_main_clk>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <1>; + syscon = <&scm_conf>; + inctrl-names = "default", "sleep"; + + interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "rx_thresh", "rx", "tx", "misc"; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + cpsw_port1: port@1 { + reg = <1>; + label = "port1"; + mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 1>; + phy-handle = <ðphy0_sw>; + phy-mode = "rgmii"; + ti,dual_emac_pvid = <1>; + }; + + cpsw_port2: port@2 { + reg = <2>; + label = "wan"; + mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 2>; + phy-handle = <ðphy1_sw>; + phy-mode = "rgmii"; + ti,dual_emac_pvid = <2>; + }; + }; + + davinci_mdio_sw: mdio@1000 { + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; + reg = <0x1000 0x100>; + clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 0>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <0>; + bus_freq = <1000000>; + + ethphy0_sw: ethernet-phy@0 { + reg = <0>; + }; + + ethphy1_sw: ethernet-phy@1 { + reg = <1>; + }; + }; + + cpts { + clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 25>; + clock-names = "cpts"; + }; + }; diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst index 7a4caaaf3a17..5bc55a4e3bce 100644 --- a/Documentation/networking/af_xdp.rst +++ b/Documentation/networking/af_xdp.rst @@ -295,7 +295,7 @@ round-robin example of distributing packets is shown below: { rr = (rr + 1) & (MAX_SOCKS - 1); - return bpf_redirect_map(&xsks_map, rr, 0); + return bpf_redirect_map(&xsks_map, rr, XDP_DROP); } Note, that since there is only a single set of FILL and COMPLETION @@ -304,6 +304,12 @@ to make sure that multiple processes or threads do not use these rings concurrently. There are no synchronization primitives in the libbpf code that protects multiple users at this point in time. +Libbpf uses this mode if you create more than one socket tied to the +same umem. However, note that you need to supply the +XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD libbpf_flag with the +xsk_socket__create calls and load your own XDP program as there is no +built in one in libbpf that will route the traffic for you. + XDP_USE_NEED_WAKEUP bind flag ----------------------------- @@ -355,10 +361,22 @@ to set the size of at least one of the RX and TX rings. If you set both, you will be able to both receive and send traffic from your application, but if you only want to do one of them, you can save resources by only setting up one of them. Both the FILL ring and the -COMPLETION ring are mandatory if you have a UMEM tied to your socket, -which is the normal case. But if the XDP_SHARED_UMEM flag is used, any -socket after the first one does not have a UMEM and should in that -case not have any FILL or COMPLETION rings created. +COMPLETION ring are mandatory as you need to have a UMEM tied to your +socket. But if the XDP_SHARED_UMEM flag is used, any socket after the +first one does not have a UMEM and should in that case not have any +FILL or COMPLETION rings created as the ones from the shared umem will +be used. Note, that the rings are single-producer single-consumer, so +do not try to access them from multiple processes at the same +time. See the XDP_SHARED_UMEM section. + +In libbpf, you can create Rx-only and Tx-only sockets by supplying +NULL to the rx and tx arguments, respectively, to the +xsk_socket__create function. + +If you create a Tx-only socket, we recommend that you do not put any +packets on the fill ring. If you do this, drivers might think you are +going to receive something when you in fact will not, and this can +negatively impact performance. XDP_UMEM_REG setsockopt ----------------------- diff --git a/Documentation/networking/device_drivers/ti/cpsw_switchdev.txt b/Documentation/networking/device_drivers/ti/cpsw_switchdev.txt new file mode 100644 index 000000000000..5c8cee17fca9 --- /dev/null +++ b/Documentation/networking/device_drivers/ti/cpsw_switchdev.txt @@ -0,0 +1,209 @@ +* Texas Instruments CPSW switchdev based ethernet driver 2.0 + +- Port renaming +On older udev versions renaming of ethX to swXpY will not be automatically +supported +In order to rename via udev: +ip -d link show dev sw0p1 | grep switchid + +SUBSYSTEM=="net", ACTION=="add", ATTR{phys_switch_id}==<switchid>, \ + ATTR{phys_port_name}!="", NAME="sw0$attr{phys_port_name}" + + +==================== +# Dual mac mode +==================== +- The new (cpsw_new.c) driver is operating in dual-emac mode by default, thus +working as 2 individual network interfaces. Main differences from legacy CPSW +driver are: + - optimized promiscuous mode: The P0_UNI_FLOOD (both ports) is enabled in +addition to ALLMULTI (current port) instead of ALE_BYPASS. +So, Ports in promiscuous mode will keep possibility of mcast and vlan filtering, +which is provides significant benefits when ports are joined to the same bridge, +but without enabling "switch" mode, or to different bridges. + - learning disabled on ports as it make not too much sense for + segregated ports - no forwarding in HW. + - enabled basic support for devlink. + + devlink dev show + platform/48484000.switch + + devlink dev param show + platform/48484000.switch: + name switch_mode type driver-specific + values: + cmode runtime value false + name ale_bypass type driver-specific + values: + cmode runtime value false + +Devlink configuration parameters +==================== +See Documentation/networking/devlink-params-ti-cpsw-switch.txt + +==================== +# Bridging in dual mac mode +==================== +The dual_mac mode requires two vids to be reserved for internal purposes, +which, by default, equal CPSW Port numbers. As result, bridge has to be +configured in vlan unaware mode or default_pvid has to be adjusted. + + ip link add name br0 type bridge + ip link set dev br0 type bridge vlan_filtering 0 + echo 0 > /sys/class/net/br0/bridge/default_pvid + ip link set dev sw0p1 master br0 + ip link set dev sw0p2 master br0 + - or - + ip link add name br0 type bridge + ip link set dev br0 type bridge vlan_filtering 0 + echo 100 > /sys/class/net/br0/bridge/default_pvid + ip link set dev br0 type bridge vlan_filtering 1 + ip link set dev sw0p1 master br0 + ip link set dev sw0p2 master br0 + +==================== +# Enabling "switch" +==================== +The Switch mode can be enabled by configuring devlink driver parameter +"switch_mode" to 1/true: + devlink dev param set platform/48484000.switch \ + name switch_mode value 1 cmode runtime + +This can be done regardless of the state of Port's netdev devices - UP/DOWN, but +Port's netdev devices have to be in UP before joining to the bridge to avoid +overwriting of bridge configuration as CPSW switch driver copletly reloads its +configuration when first Port changes its state to UP. + +When the both interfaces joined the bridge - CPSW switch driver will enable +marking packets with offload_fwd_mark flag unless "ale_bypass=0" + +All configuration is implemented via switchdev API. + +==================== +# Bridge setup +==================== + devlink dev param set platform/48484000.switch \ + name switch_mode value 1 cmode runtime + + ip link add name br0 type bridge + ip link set dev br0 type bridge ageing_time 1000 + ip link set dev sw0p1 up + ip link set dev sw0p2 up + ip link set dev sw0p1 master br0 + ip link set dev sw0p2 master br0 + [*] bridge vlan add dev br0 vid 1 pvid untagged self + +[*] if vlan_filtering=1. where default_pvid=1 + +================= +# On/off STP +================= +ip link set dev BRDEV type bridge stp_state 1/0 + +Note. Steps [*] are mandatory. + +==================== +# VLAN configuration +==================== +bridge vlan add dev br0 vid 1 pvid untagged self <---- add cpu port to VLAN 1 + +Note. This step is mandatory for bridge/default_pvid. + +================= +# Add extra VLANs +================= + 1. untagged: + bridge vlan add dev sw0p1 vid 100 pvid untagged master + bridge vlan add dev sw0p2 vid 100 pvid untagged master + bridge vlan add dev br0 vid 100 pvid untagged self <---- Add cpu port to VLAN100 + + 2. tagged: + bridge vlan add dev sw0p1 vid 100 master + bridge vlan add dev sw0p2 vid 100 master + bridge vlan add dev br0 vid 100 pvid tagged self <---- Add cpu port to VLAN100 + +==== +FDBs +==== +FDBs are automatically added on the appropriate switch port upon detection + +Manually adding FDBs: +bridge fdb add aa:bb:cc:dd:ee:ff dev sw0p1 master vlan 100 +bridge fdb add aa:bb:cc:dd:ee:fe dev sw0p2 master <---- Add on all VLANs + +==== +MDBs +==== +MDBs are automatically added on the appropriate switch port upon detection + +Manually adding MDBs: +bridge mdb add dev br0 port sw0p1 grp 239.1.1.1 permanent vid 100 +bridge mdb add dev br0 port sw0p1 grp 239.1.1.1 permanent <---- Add on all VLANs + +================== +Multicast flooding +================== +CPU port mcast_flooding is always on + +Turning flooding on/off on swithch ports: +bridge link set dev sw0p1 mcast_flood on/off + +================== +Access and Trunk port +================== + bridge vlan add dev sw0p1 vid 100 pvid untagged master + bridge vlan add dev sw0p2 vid 100 master + + + bridge vlan add dev br0 vid 100 self + ip link add link br0 name br0.100 type vlan id 100 + + Note. Setting PVID on Bridge device itself working only for + default VLAN (default_pvid). + +===================== + NFS +===================== +The only way for NFS to work is by chrooting to a minimal environment when +switch configuration that will affect connectivity is needed. +Assuming you are booting NFS with eth1 interface(the script is hacky and +it's just there to prove NFS is doable). + +setup.sh: +#!/bin/sh +mkdir proc +mount -t proc none /proc +ifconfig br0 > /dev/null +if [ $? -ne 0 ]; then + echo "Setting up bridge" + ip link add name br0 type bridge + ip link set dev br0 type bridge ageing_time 1000 + ip link set dev br0 type bridge vlan_filtering 1 + + ip link set eth1 down + ip link set eth1 name sw0p1 + ip link set dev sw0p1 up + ip link set dev sw0p2 up + ip link set dev sw0p2 master br0 + ip link set dev sw0p1 master br0 + bridge vlan add dev br0 vid 1 pvid untagged self + ifconfig sw0p1 0.0.0.0 + udhchc -i br0 +fi +umount /proc + +run_nfs.sh: +#!/bin/sh +mkdir /tmp/root/bin -p +mkdir /tmp/root/lib -p + +cp -r /lib/ /tmp/root/ +cp -r /bin/ /tmp/root/ +cp /sbin/ip /tmp/root/bin +cp /sbin/bridge /tmp/root/bin +cp /sbin/ifconfig /tmp/root/bin +cp /sbin/udhcpc /tmp/root/bin +cp /path/to/setup.sh /tmp/root/bin +chroot /tmp/root/ busybox sh /bin/setup.sh + +run ./run_nfs.sh diff --git a/Documentation/networking/devlink-params-ti-cpsw-switch.txt b/Documentation/networking/devlink-params-ti-cpsw-switch.txt new file mode 100644 index 000000000000..4037458499f7 --- /dev/null +++ b/Documentation/networking/devlink-params-ti-cpsw-switch.txt @@ -0,0 +1,10 @@ +ale_bypass [DEVICE, DRIVER-SPECIFIC] + Allows to enable ALE_CONTROL(4).BYPASS mode for debug purposes. + All packets will be sent to the Host port only if enabled. + Type: bool + Configuration mode: runtime + +switch_mode [DEVICE, DRIVER-SPECIFIC] + Enable switch mode + Type: bool + Configuration mode: runtime diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 319e5e041f38..c4a328f2d57a 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -770,10 +770,10 @@ Some core changes of the new internal format: callq foo mov %rax,%r13 mov %rbx,%rdi - mov $0x2,%esi - mov $0x3,%edx - mov $0x4,%ecx - mov $0x5,%r8d + mov $0x6,%esi + mov $0x7,%edx + mov $0x8,%ecx + mov $0x9,%r8d callq bar add %r13,%rax mov -0x228(%rbp),%rbx diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index d4dca42910d0..5acab1290e03 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -33,6 +33,7 @@ Contents: scaling tls tls-offload + nfc .. only:: subproject and html diff --git a/Documentation/networking/nfc.txt b/Documentation/networking/nfc.rst index b24c29bdae27..9aab3a88c9b2 100644 --- a/Documentation/networking/nfc.txt +++ b/Documentation/networking/nfc.rst @@ -1,3 +1,4 @@ +=================== Linux NFC subsystem =================== @@ -8,7 +9,7 @@ This document covers the architecture overview, the device driver interface description and the userspace interface description. Architecture overview ---------------------- +===================== The NFC subsystem is responsible for: - NFC adapters management; @@ -25,33 +26,34 @@ The control operations are available to userspace via generic netlink. The low-level data exchange interface is provided by the new socket family PF_NFC. The NFC_SOCKPROTO_RAW performs raw communication with NFC targets. - - +--------------------------------------+ - | USER SPACE | - +--------------------------------------+ - ^ ^ - | low-level | control - | data exchange | operations - | | - | v - | +-----------+ - | AF_NFC | netlink | - | socket +-----------+ - | raw ^ - | | - v v - +---------+ +-----------+ - | rawsock | <--------> | core | - +---------+ +-----------+ - ^ - | - v - +-----------+ - | driver | - +-----------+ +.. code-block:: none + + +--------------------------------------+ + | USER SPACE | + +--------------------------------------+ + ^ ^ + | low-level | control + | data exchange | operations + | | + | v + | +-----------+ + | AF_NFC | netlink | + | socket +-----------+ + | raw ^ + | | + v v + +---------+ +-----------+ + | rawsock | <--------> | core | + +---------+ +-----------+ + ^ + | + v + +-----------+ + | driver | + +-----------+ Device Driver Interface ------------------------ +======================= When registering on the NFC subsystem, the device driver must inform the core of the set of supported NFC protocols and the set of ops callbacks. The ops @@ -64,7 +66,7 @@ callbacks that must be implemented are the following: * data_exchange - send data and receive the response (transceive operation) Userspace interface --------------------- +=================== The userspace interface is divided in control operations and low-level data exchange operation. @@ -82,7 +84,7 @@ The operations are composed by commands and events, all listed below: * NFC_EVENT_DEVICE_ADDED - reports an NFC device addition * NFC_EVENT_DEVICE_REMOVED - reports an NFC device removal * NFC_EVENT_TARGETS_FOUND - reports START_POLL results when 1 or more targets -are found + are found The user must call START_POLL to poll for NFC targets, passing the desired NFC protocols through NFC_ATTR_PROTOCOLS attribute. The device remains in polling @@ -101,14 +103,14 @@ it's closed. LOW-LEVEL DATA EXCHANGE: The userspace must use PF_NFC sockets to perform any data communication with -targets. All NFC sockets use AF_NFC: - -struct sockaddr_nfc { - sa_family_t sa_family; - __u32 dev_idx; - __u32 target_idx; - __u32 nfc_protocol; -}; +targets. All NFC sockets use AF_NFC:: + + struct sockaddr_nfc { + sa_family_t sa_family; + __u32 dev_idx; + __u32 target_idx; + __u32 nfc_protocol; + }; To establish a connection with one target, the user must create an NFC_SOCKPROTO_RAW socket and call the 'connect' syscall with the sockaddr_nfc diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst index a689966bc4be..cda1c0a0492a 100644 --- a/Documentation/networking/phy.rst +++ b/Documentation/networking/phy.rst @@ -352,7 +352,8 @@ Fills the phydev structure with up-to-date information about the current settings in the PHY. :: - int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); + int phy_ethtool_ksettings_set(struct phy_device *phydev, + const struct ethtool_link_ksettings *cmd); Ethtool convenience functions. :: diff --git a/MAINTAINERS b/MAINTAINERS index 760049454a23..5e7f5980e75a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -643,7 +643,7 @@ F: drivers/net/ethernet/alacritech/* FORCEDETH GIGABIT ETHERNET DRIVER M: Rain River <[email protected]> -M: Zhu Yanjun <[email protected]> +M: Zhu Yanjun <[email protected]> S: Maintained F: drivers/net/ethernet/nvidia/* @@ -8311,11 +8311,14 @@ F: drivers/hid/intel-ish-hid/ INTEL IOMMU (VT-d) M: David Woodhouse <[email protected]> +M: Lu Baolu <[email protected]> -T: git git://git.infradead.org/iommu-2.6.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git S: Supported -F: drivers/iommu/intel-iommu.c +F: drivers/iommu/dmar.c +F: drivers/iommu/intel*.[ch] F: include/linux/intel-iommu.h +F: include/linux/intel-svm.h INTEL IOP-ADMA DMA DRIVER R: Dan Williams <[email protected]> @@ -17227,6 +17230,7 @@ F: virt/lib/ VIRTIO AND VHOST VSOCK DRIVER M: Stefan Hajnoczi <[email protected]> +M: Stefano Garzarella <[email protected]> @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 4 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc8 NAME = Kleptomaniac Octopus # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts index 0aaacea1d887..820ce3b60bb6 100644 --- a/arch/arm/boot/dts/am571x-idk.dts +++ b/arch/arm/boot/dts/am571x-idk.dts @@ -186,3 +186,30 @@ pinctrl-1 = <&mmc2_pins_hs>; pinctrl-2 = <&mmc2_pins_ddr_rev20 &mmc2_iodelay_ddr_conf>; }; + +&mac_sw { + pinctrl-names = "default", "sleep"; + status = "okay"; +}; + +&cpsw_port1 { + phy-handle = <ðphy0_sw>; + phy-mode = "rgmii"; + ti,dual-emac-pvid = <1>; +}; + +&cpsw_port2 { + phy-handle = <ðphy1_sw>; + phy-mode = "rgmii"; + ti,dual-emac-pvid = <2>; +}; + +&davinci_mdio_sw { + ethphy0_sw: ethernet-phy@0 { + reg = <0>; + }; + + ethphy1_sw: ethernet-phy@1 { + reg = <1>; + }; +}; diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts index ea1c119feaa5..c3d966904d64 100644 --- a/arch/arm/boot/dts/am572x-idk.dts +++ b/arch/arm/boot/dts/am572x-idk.dts @@ -27,3 +27,8 @@ pinctrl-1 = <&mmc2_pins_hs>; pinctrl-2 = <&mmc2_pins_ddr_rev20>; }; + +&mac { + status = "okay"; + dual_emac; +}; diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts index 7935d70874ce..fa0088025b2c 100644 --- a/arch/arm/boot/dts/am574x-idk.dts +++ b/arch/arm/boot/dts/am574x-idk.dts @@ -35,3 +35,8 @@ pinctrl-1 = <&mmc2_pins_default>; pinctrl-2 = <&mmc2_pins_default>; }; + +&mac { + status = "okay"; + dual_emac; +}; diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi index 423855a2a2d6..398721c7201c 100644 --- a/arch/arm/boot/dts/am57xx-idk-common.dtsi +++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi @@ -363,11 +363,6 @@ ext-clk-src; }; -&mac { - status = "okay"; - dual_emac; -}; - &cpsw_emac0 { phy-handle = <ðphy0>; phy-mode = "rgmii"; diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index 5cac2dd58241..37e048771b0f 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -3079,6 +3079,58 @@ phys = <&phy_gmii_sel 2>; }; }; + + mac_sw: switch@0 { + compatible = "ti,dra7-cpsw-switch","ti,cpsw-switch"; + reg = <0x0 0x4000>; + ranges = <0 0 0x4000>; + clocks = <&gmac_main_clk>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <1>; + syscon = <&scm_conf>; + status = "disabled"; + + interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "rx_thresh", "rx", "tx", "misc"; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + cpsw_port1: port@1 { + reg = <1>; + label = "port1"; + mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 1>; + }; + + cpsw_port2: port@2 { + reg = <2>; + label = "port2"; + mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 2>; + }; + }; + + davinci_mdio_sw: mdio@1000 { + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; + clocks = <&gmac_main_clk>; + clock-names = "fck"; + #address-cells = <1>; + #size-cells = <0>; + bus_freq = <1000000>; + reg = <0x1000 0x100>; + }; + + cpts { + clocks = <&gmac_clkctrl DRA7_GMAC_GMAC_CLKCTRL 25>; + clock-names = "cpts"; + }; + }; }; }; }; diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 40d7f1a4fc45..89cce8d4bc6b 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -554,3 +554,4 @@ CONFIG_DEBUG_INFO_DWARF4=y CONFIG_MAGIC_SYSRQ=y CONFIG_SCHEDSTATS=y # CONFIG_DEBUG_BUGVERBOSE is not set +CONFIG_TI_CPSW_SWITCHDEV=y diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 5bf963830b17..c764cc8fb3b6 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -59,23 +59,6 @@ alternative_else_nop_endif #endif /* - * These macros are no-ops when UAO is present. - */ - .macro uaccess_disable_not_uao, tmp1, tmp2 - uaccess_ttbr0_disable \tmp1, \tmp2 -alternative_if ARM64_ALT_PAN_NOT_UAO - SET_PSTATE_PAN(1) -alternative_else_nop_endif - .endm - - .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3 - uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3 -alternative_if ARM64_ALT_PAN_NOT_UAO - SET_PSTATE_PAN(0) -alternative_else_nop_endif - .endm - -/* * Remove the address tag from a virtual address, if present. */ .macro untagged_addr, dst, addr diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 097d6bfac0b7..127712b0b970 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -378,20 +378,34 @@ do { \ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); #define raw_copy_from_user(to, from, n) \ ({ \ - __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ + unsigned long __acfu_ret; \ + uaccess_enable_not_uao(); \ + __acfu_ret = __arch_copy_from_user((to), \ + __uaccess_mask_ptr(from), (n)); \ + uaccess_disable_not_uao(); \ + __acfu_ret; \ }) extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); #define raw_copy_to_user(to, from, n) \ ({ \ - __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ + unsigned long __actu_ret; \ + uaccess_enable_not_uao(); \ + __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \ + (from), (n)); \ + uaccess_disable_not_uao(); \ + __actu_ret; \ }) extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); #define raw_copy_in_user(to, from, n) \ ({ \ - __arch_copy_in_user(__uaccess_mask_ptr(to), \ - __uaccess_mask_ptr(from), (n)); \ + unsigned long __aciu_ret; \ + uaccess_enable_not_uao(); \ + __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \ + __uaccess_mask_ptr(from), (n)); \ + uaccess_disable_not_uao(); \ + __aciu_ret; \ }) #define INLINE_COPY_TO_USER @@ -400,8 +414,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { - if (access_ok(to, n)) + if (access_ok(to, n)) { + uaccess_enable_not_uao(); n = __arch_clear_user(__uaccess_mask_ptr(to), n); + uaccess_disable_not_uao(); + } return n; } #define clear_user __clear_user diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 10415572e82f..aeafc03e961a 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -20,7 +20,6 @@ * Alignment fixed up by hardware. */ ENTRY(__arch_clear_user) - uaccess_enable_not_uao x2, x3, x4 mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f @@ -40,7 +39,6 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 b.mi 5f uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 5: mov x0, #0 - uaccess_disable_not_uao x2, x3 ret ENDPROC(__arch_clear_user) EXPORT_SYMBOL(__arch_clear_user) diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 680e74409ff9..ebb3c06cbb5d 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -54,10 +54,8 @@ end .req x5 ENTRY(__arch_copy_from_user) - uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3, x4 mov x0, #0 // Nothing to copy ret ENDPROC(__arch_copy_from_user) diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 0bedae3f3792..3d8153a1ebce 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -56,10 +56,8 @@ end .req x5 ENTRY(__arch_copy_in_user) - uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(__arch_copy_in_user) diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 2d88c736e8f2..357eae2c18eb 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -53,10 +53,8 @@ end .req x5 ENTRY(__arch_copy_to_user) - uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" - uaccess_disable_not_uao x3, x4 mov x0, #0 ret ENDPROC(__arch_copy_to_user) diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c index cbfcbe6470a5..bfa30b75b2b8 100644 --- a/arch/arm64/lib/uaccess_flushcache.c +++ b/arch/arm64/lib/uaccess_flushcache.c @@ -28,7 +28,11 @@ void memcpy_page_flushcache(char *to, struct page *page, size_t offset, unsigned long __copy_user_flushcache(void *to, const void __user *from, unsigned long n) { - unsigned long rc = __arch_copy_from_user(to, from, n); + unsigned long rc; + + uaccess_enable_not_uao(); + rc = __arch_copy_from_user(to, from, n); + uaccess_disable_not_uao(); /* See above */ __clean_dcache_area_pop(to, n - rc); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index ce88211b9c6c..8d2134136290 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -23,6 +23,8 @@ #include <linux/filter.h> #include <linux/init.h> #include <linux/bpf.h> +#include <linux/mm.h> +#include <linux/kernel.h> #include <asm/cacheflush.h> #include <asm/dis.h> #include <asm/facility.h> @@ -38,10 +40,11 @@ struct bpf_jit { int size; /* Size of program and literal pool */ int size_prg; /* Size of program */ int prg; /* Current position in program */ - int lit_start; /* Start of literal pool */ - int lit; /* Current position in literal pool */ + int lit32_start; /* Start of 32-bit literal pool */ + int lit32; /* Current position in 32-bit literal pool */ + int lit64_start; /* Start of 64-bit literal pool */ + int lit64; /* Current position in 64-bit literal pool */ int base_ip; /* Base address for literal pool */ - int ret0_ip; /* Address of return 0 */ int exit_ip; /* Address of exit */ int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */ int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ @@ -49,14 +52,10 @@ struct bpf_jit { int labels[1]; /* Labels for local jumps */ }; -#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */ - -#define SEEN_MEM (1 << 0) /* use mem[] for temporary storage */ -#define SEEN_RET0 (1 << 1) /* ret0_ip points to a valid return 0 */ -#define SEEN_LITERAL (1 << 2) /* code uses literals */ -#define SEEN_FUNC (1 << 3) /* calls C functions */ -#define SEEN_TAIL_CALL (1 << 4) /* code uses tail calls */ -#define SEEN_REG_AX (1 << 5) /* code uses constant blinding */ +#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ +#define SEEN_LITERAL BIT(1) /* code uses literals */ +#define SEEN_FUNC BIT(2) /* calls C functions */ +#define SEEN_TAIL_CALL BIT(3) /* code uses tail calls */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM) /* @@ -131,13 +130,13 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define _EMIT2(op) \ ({ \ if (jit->prg_buf) \ - *(u16 *) (jit->prg_buf + jit->prg) = op; \ + *(u16 *) (jit->prg_buf + jit->prg) = (op); \ jit->prg += 2; \ }) #define EMIT2(op, b1, b2) \ ({ \ - _EMIT2(op | reg(b1, b2)); \ + _EMIT2((op) | reg(b1, b2)); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) @@ -145,20 +144,20 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define _EMIT4(op) \ ({ \ if (jit->prg_buf) \ - *(u32 *) (jit->prg_buf + jit->prg) = op; \ + *(u32 *) (jit->prg_buf + jit->prg) = (op); \ jit->prg += 4; \ }) #define EMIT4(op, b1, b2) \ ({ \ - _EMIT4(op | reg(b1, b2)); \ + _EMIT4((op) | reg(b1, b2)); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) #define EMIT4_RRF(op, b1, b2, b3) \ ({ \ - _EMIT4(op | reg_high(b3) << 8 | reg(b1, b2)); \ + _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ REG_SET_SEEN(b3); \ @@ -167,13 +166,13 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define _EMIT4_DISP(op, disp) \ ({ \ unsigned int __disp = (disp) & 0xfff; \ - _EMIT4(op | __disp); \ + _EMIT4((op) | __disp); \ }) #define EMIT4_DISP(op, b1, b2, disp) \ ({ \ - _EMIT4_DISP(op | reg_high(b1) << 16 | \ - reg_high(b2) << 8, disp); \ + _EMIT4_DISP((op) | reg_high(b1) << 16 | \ + reg_high(b2) << 8, (disp)); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) @@ -181,21 +180,27 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT4_IMM(op, b1, imm) \ ({ \ unsigned int __imm = (imm) & 0xffff; \ - _EMIT4(op | reg_high(b1) << 16 | __imm); \ + _EMIT4((op) | reg_high(b1) << 16 | __imm); \ REG_SET_SEEN(b1); \ }) #define EMIT4_PCREL(op, pcrel) \ ({ \ long __pcrel = ((pcrel) >> 1) & 0xffff; \ - _EMIT4(op | __pcrel); \ + _EMIT4((op) | __pcrel); \ +}) + +#define EMIT4_PCREL_RIC(op, mask, target) \ +({ \ + int __rel = ((target) - jit->prg) / 2; \ + _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \ }) #define _EMIT6(op1, op2) \ ({ \ if (jit->prg_buf) { \ - *(u32 *) (jit->prg_buf + jit->prg) = op1; \ - *(u16 *) (jit->prg_buf + jit->prg + 4) = op2; \ + *(u32 *) (jit->prg_buf + jit->prg) = (op1); \ + *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \ } \ jit->prg += 6; \ }) @@ -203,20 +208,20 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define _EMIT6_DISP(op1, op2, disp) \ ({ \ unsigned int __disp = (disp) & 0xfff; \ - _EMIT6(op1 | __disp, op2); \ + _EMIT6((op1) | __disp, op2); \ }) #define _EMIT6_DISP_LH(op1, op2, disp) \ ({ \ - u32 _disp = (u32) disp; \ + u32 _disp = (u32) (disp); \ unsigned int __disp_h = _disp & 0xff000; \ unsigned int __disp_l = _disp & 0x00fff; \ - _EMIT6(op1 | __disp_l, op2 | __disp_h >> 4); \ + _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \ }) #define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \ ({ \ - _EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 | \ + _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \ reg_high(b3) << 8, op2, disp); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ @@ -226,8 +231,8 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \ ({ \ int rel = (jit->labels[label] - jit->prg) >> 1; \ - _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), \ - op2 | mask << 12); \ + _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \ + (op2) | (mask) << 12); \ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) @@ -235,68 +240,83 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \ ({ \ int rel = (jit->labels[label] - jit->prg) >> 1; \ - _EMIT6(op1 | (reg_high(b1) | mask) << 16 | \ - (rel & 0xffff), op2 | (imm & 0xff) << 8); \ + _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \ + (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \ REG_SET_SEEN(b1); \ - BUILD_BUG_ON(((unsigned long) imm) > 0xff); \ + BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \ }) #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ ({ \ /* Branch instruction needs 6 bytes */ \ - int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\ - _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask); \ + int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\ + _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) #define EMIT6_PCREL_RILB(op, b, target) \ ({ \ - int rel = (target - jit->prg) / 2; \ - _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \ + unsigned int rel = (int)((target) - jit->prg) / 2; \ + _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\ REG_SET_SEEN(b); \ }) #define EMIT6_PCREL_RIL(op, target) \ ({ \ - int rel = (target - jit->prg) / 2; \ - _EMIT6(op | rel >> 16, rel & 0xffff); \ + unsigned int rel = (int)((target) - jit->prg) / 2; \ + _EMIT6((op) | rel >> 16, rel & 0xffff); \ +}) + +#define EMIT6_PCREL_RILC(op, mask, target) \ +({ \ + EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \ }) #define _EMIT6_IMM(op, imm) \ ({ \ unsigned int __imm = (imm); \ - _EMIT6(op | (__imm >> 16), __imm & 0xffff); \ + _EMIT6((op) | (__imm >> 16), __imm & 0xffff); \ }) #define EMIT6_IMM(op, b1, imm) \ ({ \ - _EMIT6_IMM(op | reg_high(b1) << 16, imm); \ + _EMIT6_IMM((op) | reg_high(b1) << 16, imm); \ REG_SET_SEEN(b1); \ }) -#define EMIT_CONST_U32(val) \ +#define _EMIT_CONST_U32(val) \ ({ \ unsigned int ret; \ - ret = jit->lit - jit->base_ip; \ - jit->seen |= SEEN_LITERAL; \ + ret = jit->lit32; \ if (jit->prg_buf) \ - *(u32 *) (jit->prg_buf + jit->lit) = (u32) val; \ - jit->lit += 4; \ + *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\ + jit->lit32 += 4; \ ret; \ }) -#define EMIT_CONST_U64(val) \ +#define EMIT_CONST_U32(val) \ ({ \ - unsigned int ret; \ - ret = jit->lit - jit->base_ip; \ jit->seen |= SEEN_LITERAL; \ + _EMIT_CONST_U32(val) - jit->base_ip; \ +}) + +#define _EMIT_CONST_U64(val) \ +({ \ + unsigned int ret; \ + ret = jit->lit64; \ if (jit->prg_buf) \ - *(u64 *) (jit->prg_buf + jit->lit) = (u64) val; \ - jit->lit += 8; \ + *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\ + jit->lit64 += 8; \ ret; \ }) +#define EMIT_CONST_U64(val) \ +({ \ + jit->seen |= SEEN_LITERAL; \ + _EMIT_CONST_U64(val) - jit->base_ip; \ +}) + #define EMIT_ZERO(b1) \ ({ \ if (!fp->aux->verifier_zext) { \ @@ -307,6 +327,67 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) }) /* + * Return whether this is the first pass. The first pass is special, since we + * don't know any sizes yet, and thus must be conservative. + */ +static bool is_first_pass(struct bpf_jit *jit) +{ + return jit->size == 0; +} + +/* + * Return whether this is the code generation pass. The code generation pass is + * special, since we should change as little as possible. + */ +static bool is_codegen_pass(struct bpf_jit *jit) +{ + return jit->prg_buf; +} + +/* + * Return whether "rel" can be encoded as a short PC-relative offset + */ +static bool is_valid_rel(int rel) +{ + return rel >= -65536 && rel <= 65534; +} + +/* + * Return whether "off" can be reached using a short PC-relative offset + */ +static bool can_use_rel(struct bpf_jit *jit, int off) +{ + return is_valid_rel(off - jit->prg); +} + +/* + * Return whether given displacement can be encoded using + * Long-Displacement Facility + */ +static bool is_valid_ldisp(int disp) +{ + return disp >= -524288 && disp <= 524287; +} + +/* + * Return whether the next 32-bit literal pool entry can be referenced using + * Long-Displacement Facility + */ +static bool can_use_ldisp_for_lit32(struct bpf_jit *jit) +{ + return is_valid_ldisp(jit->lit32 - jit->base_ip); +} + +/* + * Return whether the next 64-bit literal pool entry can be referenced using + * Long-Displacement Facility + */ +static bool can_use_ldisp_for_lit64(struct bpf_jit *jit) +{ + return is_valid_ldisp(jit->lit64 - jit->base_ip); +} + +/* * Fill whole space with illegal instructions */ static void jit_fill_hole(void *area, unsigned int size) @@ -383,9 +464,18 @@ static int get_end(struct bpf_jit *jit, int start) */ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) { - + const int last = 15, save_restore_size = 6; int re = 6, rs; + if (is_first_pass(jit)) { + /* + * We don't know yet which registers are used. Reserve space + * conservatively. + */ + jit->prg += (last - re + 1) * save_restore_size; + return; + } + do { rs = get_start(jit, re); if (!rs) @@ -396,7 +486,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) else restore_regs(jit, rs, re, stack_depth); re++; - } while (re <= 15); + } while (re <= last); } /* @@ -420,21 +510,28 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth) /* Save registers */ save_restore_regs(jit, REGS_SAVE, stack_depth); /* Setup literal pool */ - if (jit->seen & SEEN_LITERAL) { - /* basr %r13,0 */ - EMIT2(0x0d00, REG_L, REG_0); - jit->base_ip = jit->prg; + if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) { + if (!is_first_pass(jit) && + is_valid_ldisp(jit->size - (jit->prg + 2))) { + /* basr %l,0 */ + EMIT2(0x0d00, REG_L, REG_0); + jit->base_ip = jit->prg; + } else { + /* larl %l,lit32_start */ + EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start); + jit->base_ip = jit->lit32_start; + } } /* Setup stack and backchain */ - if (jit->seen & SEEN_STACK) { - if (jit->seen & SEEN_FUNC) + if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) { + if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) /* lgr %w1,%r15 (backchain) */ EMIT4(0xb9040000, REG_W1, REG_15); /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */ EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED); /* aghi %r15,-STK_OFF */ EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth)); - if (jit->seen & SEEN_FUNC) + if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) /* stg %w1,152(%r15) (backchain) */ EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 152); @@ -446,12 +543,6 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth) */ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) { - /* Return 0 */ - if (jit->seen & SEEN_RET0) { - jit->ret0_ip = jit->prg; - /* lghi %b0,0 */ - EMIT4_IMM(0xa7090000, BPF_REG_0, 0); - } jit->exit_ip = jit->prg; /* Load exit code: lgr %r2,%b0 */ EMIT4(0xb9040000, REG_2, BPF_REG_0); @@ -476,7 +567,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) _EMIT2(0x07fe); if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable && - (jit->seen & SEEN_FUNC)) { + (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) { jit->r1_thunk_ip = jit->prg; /* Generate __s390_indirect_jump_r1 thunk */ if (test_facility(35)) { @@ -506,16 +597,14 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i, bool extra_pass) { struct bpf_insn *insn = &fp->insnsi[i]; - int jmp_off, last, insn_count = 1; u32 dst_reg = insn->dst_reg; u32 src_reg = insn->src_reg; + int last, insn_count = 1; u32 *addrs = jit->addrs; s32 imm = insn->imm; s16 off = insn->off; unsigned int mask; - if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX) - jit->seen |= SEEN_REG_AX; switch (insn->code) { /* * BPF_MOV @@ -549,9 +638,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, u64 imm64; imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32; - /* lg %dst,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L, - EMIT_CONST_U64(imm64)); + /* lgrl %dst,imm */ + EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64)); insn_count = 2; break; } @@ -680,9 +768,18 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4_IMM(0xa7080000, REG_W0, 0); /* lr %w1,%dst */ EMIT2(0x1800, REG_W1, dst_reg); - /* dl %w0,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L, - EMIT_CONST_U32(imm)); + if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) { + /* dl %w0,<d(imm)>(%l) */ + EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L, + EMIT_CONST_U32(imm)); + } else { + /* lgfrl %dst,imm */ + EMIT6_PCREL_RILB(0xc40c0000, dst_reg, + _EMIT_CONST_U32(imm)); + jit->seen |= SEEN_LITERAL; + /* dlr %w0,%dst */ + EMIT4(0xb9970000, REG_W0, dst_reg); + } /* llgfr %dst,%rc */ EMIT4(0xb9160000, dst_reg, rc_reg); if (insn_is_zext(&insn[1])) @@ -704,9 +801,18 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4_IMM(0xa7090000, REG_W0, 0); /* lgr %w1,%dst */ EMIT4(0xb9040000, REG_W1, dst_reg); - /* dlg %w0,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L, - EMIT_CONST_U64(imm)); + if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { + /* dlg %w0,<d(imm)>(%l) */ + EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L, + EMIT_CONST_U64(imm)); + } else { + /* lgrl %dst,imm */ + EMIT6_PCREL_RILB(0xc4080000, dst_reg, + _EMIT_CONST_U64(imm)); + jit->seen |= SEEN_LITERAL; + /* dlgr %w0,%dst */ + EMIT4(0xb9870000, REG_W0, dst_reg); + } /* lgr %dst,%rc */ EMIT4(0xb9040000, dst_reg, rc_reg); break; @@ -729,9 +835,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */ - /* ng %dst,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L, - EMIT_CONST_U64(imm)); + if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { + /* ng %dst,<d(imm)>(%l) */ + EMIT6_DISP_LH(0xe3000000, 0x0080, + dst_reg, REG_0, REG_L, + EMIT_CONST_U64(imm)); + } else { + /* lgrl %w0,imm */ + EMIT6_PCREL_RILB(0xc4080000, REG_W0, + _EMIT_CONST_U64(imm)); + jit->seen |= SEEN_LITERAL; + /* ngr %dst,%w0 */ + EMIT4(0xb9800000, dst_reg, REG_W0); + } break; /* * BPF_OR @@ -751,9 +867,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */ - /* og %dst,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L, - EMIT_CONST_U64(imm)); + if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { + /* og %dst,<d(imm)>(%l) */ + EMIT6_DISP_LH(0xe3000000, 0x0081, + dst_reg, REG_0, REG_L, + EMIT_CONST_U64(imm)); + } else { + /* lgrl %w0,imm */ + EMIT6_PCREL_RILB(0xc4080000, REG_W0, + _EMIT_CONST_U64(imm)); + jit->seen |= SEEN_LITERAL; + /* ogr %dst,%w0 */ + EMIT4(0xb9810000, dst_reg, REG_W0); + } break; /* * BPF_XOR @@ -775,9 +901,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */ - /* xg %dst,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L, - EMIT_CONST_U64(imm)); + if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) { + /* xg %dst,<d(imm)>(%l) */ + EMIT6_DISP_LH(0xe3000000, 0x0082, + dst_reg, REG_0, REG_L, + EMIT_CONST_U64(imm)); + } else { + /* lgrl %w0,imm */ + EMIT6_PCREL_RILB(0xc4080000, REG_W0, + _EMIT_CONST_U64(imm)); + jit->seen |= SEEN_LITERAL; + /* xgr %dst,%w0 */ + EMIT4(0xb9820000, dst_reg, REG_W0); + } break; /* * BPF_LSH @@ -1023,9 +1159,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, REG_SET_SEEN(BPF_REG_5); jit->seen |= SEEN_FUNC; - /* lg %w1,<d(imm)>(%l) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, - EMIT_CONST_U64(func)); + /* lgrl %w1,func */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func)); if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) { /* brasl %r14,__s390_indirect_jump_r1 */ EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip); @@ -1054,9 +1189,17 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* llgf %w1,map.max_entries(%b2) */ EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, offsetof(struct bpf_array, map.max_entries)); - /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */ - EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3, - REG_W1, 0, 0xa); + /* if ((u32)%b3 >= (u32)%w1) goto out; */ + if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { + /* clrj %b3,%w1,0xa,label0 */ + EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3, + REG_W1, 0, 0xa); + } else { + /* clr %b3,%w1 */ + EMIT2(0x1500, BPF_REG_3, REG_W1); + /* brcl 0xa,label0 */ + EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]); + } /* * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT) @@ -1071,9 +1214,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4_IMM(0xa7080000, REG_W0, 1); /* laal %w1,%w0,off(%r15) */ EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off); - /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */ - EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1, - MAX_TAIL_CALL_CNT, 0, 0x2); + if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { + /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */ + EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1, + MAX_TAIL_CALL_CNT, 0, 0x2); + } else { + /* clfi %w1,MAX_TAIL_CALL_CNT */ + EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT); + /* brcl 0x2,label0 */ + EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]); + } /* * prog = array->ptrs[index]; @@ -1085,11 +1235,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4(0xb9160000, REG_1, BPF_REG_3); /* sllg %r1,%r1,3: %r1 *= 8 */ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3); - /* lg %r1,prog(%b2,%r1) */ - EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, + /* ltg %r1,prog(%b2,%r1) */ + EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2, REG_1, offsetof(struct bpf_array, ptrs)); - /* clgij %r1,0,0x8,label0 */ - EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8); + if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) { + /* brc 0x8,label0 */ + EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]); + } else { + /* brcl 0x8,label0 */ + EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]); + } /* * Restore registers before calling function @@ -1110,7 +1265,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, break; case BPF_JMP | BPF_EXIT: /* return b0 */ last = (i == fp->len - 1) ? 1 : 0; - if (last && !(jit->seen & SEEN_RET0)) + if (last) break; /* j <exit> */ EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); @@ -1246,36 +1401,83 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, goto branch_oc; branch_ks: is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; - /* lgfi %w1,imm (load sign extend imm) */ - EMIT6_IMM(0xc0010000, REG_W1, imm); - /* crj or cgrj %dst,%w1,mask,off */ - EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), - dst_reg, REG_W1, i, off, mask); + /* cfi or cgfi %dst,imm */ + EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000, + dst_reg, imm); + if (!is_first_pass(jit) && + can_use_rel(jit, addrs[i + off + 1])) { + /* brc mask,off */ + EMIT4_PCREL_RIC(0xa7040000, + mask >> 12, addrs[i + off + 1]); + } else { + /* brcl mask,off */ + EMIT6_PCREL_RILC(0xc0040000, + mask >> 12, addrs[i + off + 1]); + } break; branch_ku: is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; - /* lgfi %w1,imm (load sign extend imm) */ - EMIT6_IMM(0xc0010000, REG_W1, imm); - /* clrj or clgrj %dst,%w1,mask,off */ - EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), - dst_reg, REG_W1, i, off, mask); + /* clfi or clgfi %dst,imm */ + EMIT6_IMM(is_jmp32 ? 0xc20f0000 : 0xc20e0000, + dst_reg, imm); + if (!is_first_pass(jit) && + can_use_rel(jit, addrs[i + off + 1])) { + /* brc mask,off */ + EMIT4_PCREL_RIC(0xa7040000, + mask >> 12, addrs[i + off + 1]); + } else { + /* brcl mask,off */ + EMIT6_PCREL_RILC(0xc0040000, + mask >> 12, addrs[i + off + 1]); + } break; branch_xs: is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; - /* crj or cgrj %dst,%src,mask,off */ - EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), - dst_reg, src_reg, i, off, mask); + if (!is_first_pass(jit) && + can_use_rel(jit, addrs[i + off + 1])) { + /* crj or cgrj %dst,%src,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), + dst_reg, src_reg, i, off, mask); + } else { + /* cr or cgr %dst,%src */ + if (is_jmp32) + EMIT2(0x1900, dst_reg, src_reg); + else + EMIT4(0xb9200000, dst_reg, src_reg); + /* brcl mask,off */ + EMIT6_PCREL_RILC(0xc0040000, + mask >> 12, addrs[i + off + 1]); + } break; branch_xu: is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; - /* clrj or clgrj %dst,%src,mask,off */ - EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), - dst_reg, src_reg, i, off, mask); + if (!is_first_pass(jit) && + can_use_rel(jit, addrs[i + off + 1])) { + /* clrj or clgrj %dst,%src,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), + dst_reg, src_reg, i, off, mask); + } else { + /* clr or clgr %dst,%src */ + if (is_jmp32) + EMIT2(0x1500, dst_reg, src_reg); + else + EMIT4(0xb9210000, dst_reg, src_reg); + /* brcl mask,off */ + EMIT6_PCREL_RILC(0xc0040000, + mask >> 12, addrs[i + off + 1]); + } break; branch_oc: - /* brc mask,jmp_off (branch instruction needs 4 bytes) */ - jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4); - EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off); + if (!is_first_pass(jit) && + can_use_rel(jit, addrs[i + off + 1])) { + /* brc mask,off */ + EMIT4_PCREL_RIC(0xa7040000, + mask >> 12, addrs[i + off + 1]); + } else { + /* brcl mask,off */ + EMIT6_PCREL_RILC(0xc0040000, + mask >> 12, addrs[i + off + 1]); + } break; } default: /* too complex, give up */ @@ -1286,28 +1488,67 @@ branch_oc: } /* + * Return whether new i-th instruction address does not violate any invariant + */ +static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i) +{ + /* On the first pass anything goes */ + if (is_first_pass(jit)) + return true; + + /* The codegen pass must not change anything */ + if (is_codegen_pass(jit)) + return jit->addrs[i] == jit->prg; + + /* Passes in between must not increase code size */ + return jit->addrs[i] >= jit->prg; +} + +/* + * Update the address of i-th instruction + */ +static int bpf_set_addr(struct bpf_jit *jit, int i) +{ + if (!bpf_is_new_addr_sane(jit, i)) + return -1; + jit->addrs[i] = jit->prg; + return 0; +} + +/* * Compile eBPF program into s390x code */ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, bool extra_pass) { - int i, insn_count; + int i, insn_count, lit32_size, lit64_size; - jit->lit = jit->lit_start; + jit->lit32 = jit->lit32_start; + jit->lit64 = jit->lit64_start; jit->prg = 0; bpf_jit_prologue(jit, fp->aux->stack_depth); + if (bpf_set_addr(jit, 0) < 0) + return -1; for (i = 0; i < fp->len; i += insn_count) { insn_count = bpf_jit_insn(jit, fp, i, extra_pass); if (insn_count < 0) return -1; /* Next instruction address */ - jit->addrs[i + insn_count] = jit->prg; + if (bpf_set_addr(jit, i + insn_count) < 0) + return -1; } bpf_jit_epilogue(jit, fp->aux->stack_depth); - jit->lit_start = jit->prg; - jit->size = jit->lit; + lit32_size = jit->lit32 - jit->lit32_start; + lit64_size = jit->lit64 - jit->lit64_start; + jit->lit32_start = jit->prg; + if (lit32_size) + jit->lit32_start = ALIGN(jit->lit32_start, 4); + jit->lit64_start = jit->lit32_start + lit32_size; + if (lit64_size) + jit->lit64_start = ALIGN(jit->lit64_start, 8); + jit->size = jit->lit64_start + lit64_size; jit->size_prg = jit->prg; return 0; } @@ -1369,7 +1610,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) } memset(&jit, 0, sizeof(jit)); - jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); + jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); if (jit.addrs == NULL) { fp = orig_fp; goto out; @@ -1388,12 +1629,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) /* * Final pass: Allocate and generate program */ - if (jit.size >= BPF_SIZE_MAX) { - fp = orig_fp; - goto free_addrs; - } - - header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole); + header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 8, jit_fill_hole); if (!header) { fp = orig_fp; goto free_addrs; @@ -1422,7 +1658,7 @@ skip_init_ctx: if (!fp->is_func || extra_pass) { bpf_prog_fill_jited_linfo(fp, jit.addrs + 1); free_addrs: - kfree(jit.addrs); + kvfree(jit.addrs); kfree(jit_data); fp->aux->jit_data = NULL; } diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index 5e8319bb207a..23c626a742e8 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -26,10 +26,11 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, #define POKE_MAX_OPCODE_SIZE 5 struct text_poke_loc { - void *detour; void *addr; - size_t len; - const char opcode[POKE_MAX_OPCODE_SIZE]; + int len; + s32 rel32; + u8 opcode; + const u8 text[POKE_MAX_OPCODE_SIZE]; }; extern void text_poke_early(void *addr, const void *opcode, size_t len); @@ -51,8 +52,10 @@ extern void text_poke_early(void *addr, const void *opcode, size_t len); extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); extern int poke_int3_handler(struct pt_regs *regs); -extern void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); +extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); extern void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries); +extern void text_poke_loc_init(struct text_poke_loc *tp, void *addr, + const void *opcode, size_t len, const void *emulate); extern int after_bootmem; extern __ro_after_init struct mm_struct *poking_mm; extern __ro_after_init unsigned long poking_addr; @@ -63,8 +66,17 @@ static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) regs->ip = ip; } -#define INT3_INSN_SIZE 1 -#define CALL_INSN_SIZE 5 +#define INT3_INSN_SIZE 1 +#define INT3_INSN_OPCODE 0xCC + +#define CALL_INSN_SIZE 5 +#define CALL_INSN_OPCODE 0xE8 + +#define JMP32_INSN_SIZE 5 +#define JMP32_INSN_OPCODE 0xE9 + +#define JMP8_INSN_SIZE 2 +#define JMP8_INSN_OPCODE 0xEB static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) { diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 9d3a971ea364..9ec463fe96f2 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -956,16 +956,15 @@ NOKPROBE_SYMBOL(patch_cmp); int poke_int3_handler(struct pt_regs *regs) { struct text_poke_loc *tp; - unsigned char int3 = 0xcc; void *ip; /* * Having observed our INT3 instruction, we now must observe * bp_patching.nr_entries. * - * nr_entries != 0 INT3 - * WMB RMB - * write INT3 if (nr_entries) + * nr_entries != 0 INT3 + * WMB RMB + * write INT3 if (nr_entries) * * Idem for other elements in bp_patching. */ @@ -978,9 +977,9 @@ int poke_int3_handler(struct pt_regs *regs) return 0; /* - * Discount the sizeof(int3). See text_poke_bp_batch(). + * Discount the INT3. See text_poke_bp_batch(). */ - ip = (void *) regs->ip - sizeof(int3); + ip = (void *) regs->ip - INT3_INSN_SIZE; /* * Skip the binary search if there is a single member in the vector. @@ -997,8 +996,28 @@ int poke_int3_handler(struct pt_regs *regs) return 0; } - /* set up the specified breakpoint detour */ - regs->ip = (unsigned long) tp->detour; + ip += tp->len; + + switch (tp->opcode) { + case INT3_INSN_OPCODE: + /* + * Someone poked an explicit INT3, they'll want to handle it, + * do not consume. + */ + return 0; + + case CALL_INSN_OPCODE: + int3_emulate_call(regs, (long)ip + tp->rel32); + break; + + case JMP32_INSN_OPCODE: + case JMP8_INSN_OPCODE: + int3_emulate_jmp(regs, (long)ip + tp->rel32); + break; + + default: + BUG(); + } return 1; } @@ -1014,7 +1033,7 @@ NOKPROBE_SYMBOL(poke_int3_handler); * synchronization using int3 breakpoint. * * The way it is done: - * - For each entry in the vector: + * - For each entry in the vector: * - add a int3 trap to the address that will be patched * - sync cores * - For each entry in the vector: @@ -1027,9 +1046,9 @@ NOKPROBE_SYMBOL(poke_int3_handler); */ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) { - int patched_all_but_first = 0; - unsigned char int3 = 0xcc; + unsigned char int3 = INT3_INSN_OPCODE; unsigned int i; + int do_sync; lockdep_assert_held(&text_mutex); @@ -1053,16 +1072,16 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) /* * Second step: update all but the first byte of the patched range. */ - for (i = 0; i < nr_entries; i++) { + for (do_sync = 0, i = 0; i < nr_entries; i++) { if (tp[i].len - sizeof(int3) > 0) { text_poke((char *)tp[i].addr + sizeof(int3), - (const char *)tp[i].opcode + sizeof(int3), + (const char *)tp[i].text + sizeof(int3), tp[i].len - sizeof(int3)); - patched_all_but_first++; + do_sync++; } } - if (patched_all_but_first) { + if (do_sync) { /* * According to Intel, this core syncing is very likely * not necessary and we'd be safe even without it. But @@ -1075,10 +1094,17 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) * Third step: replace the first byte (int3) by the first byte of * replacing opcode. */ - for (i = 0; i < nr_entries; i++) - text_poke(tp[i].addr, tp[i].opcode, sizeof(int3)); + for (do_sync = 0, i = 0; i < nr_entries; i++) { + if (tp[i].text[0] == INT3_INSN_OPCODE) + continue; + + text_poke(tp[i].addr, tp[i].text, sizeof(int3)); + do_sync++; + } + + if (do_sync) + on_each_cpu(do_sync_core, NULL, 1); - on_each_cpu(do_sync_core, NULL, 1); /* * sync_core() implies an smp_mb() and orders this store against * the writing of the new instruction. @@ -1087,6 +1113,60 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) bp_patching.nr_entries = 0; } +void text_poke_loc_init(struct text_poke_loc *tp, void *addr, + const void *opcode, size_t len, const void *emulate) +{ + struct insn insn; + + if (!opcode) + opcode = (void *)tp->text; + else + memcpy((void *)tp->text, opcode, len); + + if (!emulate) + emulate = opcode; + + kernel_insn_init(&insn, emulate, MAX_INSN_SIZE); + insn_get_length(&insn); + + BUG_ON(!insn_complete(&insn)); + BUG_ON(len != insn.length); + + tp->addr = addr; + tp->len = len; + tp->opcode = insn.opcode.bytes[0]; + + switch (tp->opcode) { + case INT3_INSN_OPCODE: + break; + + case CALL_INSN_OPCODE: + case JMP32_INSN_OPCODE: + case JMP8_INSN_OPCODE: + tp->rel32 = insn.immediate.value; + break; + + default: /* assume NOP */ + switch (len) { + case 2: /* NOP2 -- emulate as JMP8+0 */ + BUG_ON(memcmp(emulate, ideal_nops[len], len)); + tp->opcode = JMP8_INSN_OPCODE; + tp->rel32 = 0; + break; + + case 5: /* NOP5 -- emulate as JMP32+0 */ + BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len)); + tp->opcode = JMP32_INSN_OPCODE; + tp->rel32 = 0; + break; + + default: /* unknown instruction */ + BUG(); + } + break; + } +} + /** * text_poke_bp() -- update instructions on live kernel on SMP * @addr: address to patch @@ -1098,20 +1178,10 @@ void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) * dynamically allocated memory. This function should be used when it is * not possible to allocate memory. */ -void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) +void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate) { - struct text_poke_loc tp = { - .detour = handler, - .addr = addr, - .len = len, - }; - - if (len > POKE_MAX_OPCODE_SIZE) { - WARN_ONCE(1, "len is larger than %d\n", POKE_MAX_OPCODE_SIZE); - return; - } - - memcpy((void *)tp.opcode, opcode, len); + struct text_poke_loc tp; + text_poke_loc_init(&tp, addr, opcode, len, emulate); text_poke_bp_batch(&tp, 1); } diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index 044053235302..c1a8b9e71408 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -89,8 +89,7 @@ static void __ref __jump_label_transform(struct jump_entry *entry, return; } - text_poke_bp((void *)jump_entry_code(entry), &code, JUMP_LABEL_NOP_SIZE, - (void *)jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE); + text_poke_bp((void *)jump_entry_code(entry), &code, JUMP_LABEL_NOP_SIZE, NULL); } void arch_jump_label_transform(struct jump_entry *entry, @@ -147,11 +146,9 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry, } __jump_label_set_jump_code(entry, type, - (union jump_code_union *) &tp->opcode, 0); + (union jump_code_union *)&tp->text, 0); - tp->addr = entry_code; - tp->detour = entry_code + JUMP_LABEL_NOP_SIZE; - tp->len = JUMP_LABEL_NOP_SIZE; + text_poke_loc_init(tp, entry_code, NULL, JUMP_LABEL_NOP_SIZE, NULL); tp_vec_nr++; diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index b348dd506d58..8900329c28a7 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -437,8 +437,7 @@ void arch_optimize_kprobes(struct list_head *oplist) insn_buff[0] = RELATIVEJUMP_OPCODE; *(s32 *)(&insn_buff[1]) = rel; - text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE, - op->optinsn.insn); + text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE, NULL); list_del_init(&op->list); } @@ -448,12 +447,18 @@ void arch_optimize_kprobes(struct list_head *oplist) void arch_unoptimize_kprobe(struct optimized_kprobe *op) { u8 insn_buff[RELATIVEJUMP_SIZE]; + u8 emulate_buff[RELATIVEJUMP_SIZE]; /* Set int3 to first byte for kprobes */ insn_buff[0] = BREAKPOINT_INSTRUCTION; memcpy(insn_buff + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); + + emulate_buff[0] = RELATIVEJUMP_OPCODE; + *(s32 *)(&emulate_buff[1]) = (s32)((long)op->optinsn.insn - + ((long)op->kp.addr + RELATIVEJUMP_SIZE)); + text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE, - op->optinsn.insn); + emulate_buff); } /* diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 8cd23d8309bf..2e586f579945 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -9,9 +9,11 @@ #include <linux/filter.h> #include <linux/if_vlan.h> #include <linux/bpf.h> +#include <linux/memory.h> #include <asm/extable.h> #include <asm/set_memory.h> #include <asm/nospec-branch.h> +#include <asm/text-patching.h> static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { @@ -96,6 +98,7 @@ static int bpf_size_to_x86_bytes(int bpf_size) /* Pick a register outside of BPF range for JIT internal work */ #define AUX_REG (MAX_BPF_JIT_REG + 1) +#define X86_REG_R9 (MAX_BPF_JIT_REG + 2) /* * The following table maps BPF registers to x86-64 registers. @@ -104,8 +107,8 @@ static int bpf_size_to_x86_bytes(int bpf_size) * register in load/store instructions, it always needs an * extra byte of encoding and is callee saved. * - * Also x86-64 register R9 is unused. x86-64 register R10 is - * used for blinding (if enabled). + * x86-64 register R9 is not used by BPF programs, but can be used by BPF + * trampoline. x86-64 register R10 is used for blinding (if enabled). */ static const int reg2hex[] = { [BPF_REG_0] = 0, /* RAX */ @@ -121,6 +124,7 @@ static const int reg2hex[] = { [BPF_REG_FP] = 5, /* RBP readonly */ [BPF_REG_AX] = 2, /* R10 temp register */ [AUX_REG] = 3, /* R11 temp register */ + [X86_REG_R9] = 1, /* R9 register, 6th function argument */ }; static const int reg2pt_regs[] = { @@ -148,6 +152,7 @@ static bool is_ereg(u32 reg) BIT(BPF_REG_7) | BIT(BPF_REG_8) | BIT(BPF_REG_9) | + BIT(X86_REG_R9) | BIT(BPF_REG_AX)); } @@ -198,8 +203,10 @@ struct jit_context { /* Maximum number of bytes emitted while JITing one eBPF insn */ #define BPF_MAX_INSN_SIZE 128 #define BPF_INSN_SAFETY 64 +/* number of bytes emit_call() needs to generate call instruction */ +#define X86_CALL_SIZE 5 -#define PROLOGUE_SIZE 20 +#define PROLOGUE_SIZE 25 /* * Emit x86-64 prologue code for BPF program and check its size. @@ -208,8 +215,13 @@ struct jit_context { static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) { u8 *prog = *pprog; - int cnt = 0; + int cnt = X86_CALL_SIZE; + /* BPF trampoline can be made to work without these nops, + * but let's waste 5 bytes for now and optimize later + */ + memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt); + prog += cnt; EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ /* sub rsp, rounded_stack_depth */ @@ -390,6 +402,149 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) *pprog = prog; } +/* LDX: dst_reg = *(u8*)(src_reg + off) */ +static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) +{ + u8 *prog = *pprog; + int cnt = 0; + + switch (size) { + case BPF_B: + /* Emit 'movzx rax, byte ptr [rax + off]' */ + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); + break; + case BPF_H: + /* Emit 'movzx rax, word ptr [rax + off]' */ + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); + break; + case BPF_W: + /* Emit 'mov eax, dword ptr [rax+0x14]' */ + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); + else + EMIT1(0x8B); + break; + case BPF_DW: + /* Emit 'mov rax, qword ptr [rax+0x14]' */ + EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); + break; + } + /* + * If insn->off == 0 we can save one extra byte, but + * special case of x86 R13 which always needs an offset + * is not worth the hassle + */ + if (is_imm8(off)) + EMIT2(add_2reg(0x40, src_reg, dst_reg), off); + else + EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off); + *pprog = prog; +} + +/* STX: *(u8*)(dst_reg + off) = src_reg */ +static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) +{ + u8 *prog = *pprog; + int cnt = 0; + + switch (size) { + case BPF_B: + /* Emit 'mov byte ptr [rax + off], al' */ + if (is_ereg(dst_reg) || is_ereg(src_reg) || + /* We have to add extra byte for x86 SIL, DIL regs */ + src_reg == BPF_REG_1 || src_reg == BPF_REG_2) + EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); + else + EMIT1(0x88); + break; + case BPF_H: + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); + else + EMIT2(0x66, 0x89); + break; + case BPF_W: + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); + else + EMIT1(0x89); + break; + case BPF_DW: + EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); + break; + } + if (is_imm8(off)) + EMIT2(add_2reg(0x40, dst_reg, src_reg), off); + else + EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off); + *pprog = prog; +} + +static int emit_call(u8 **pprog, void *func, void *ip) +{ + u8 *prog = *pprog; + int cnt = 0; + s64 offset; + + offset = func - (ip + X86_CALL_SIZE); + if (!is_simm32(offset)) { + pr_err("Target call %p is out of range\n", func); + return -EINVAL; + } + EMIT1_off32(0xE8, offset); + *pprog = prog; + return 0; +} + +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, + void *old_addr, void *new_addr) +{ + u8 old_insn[X86_CALL_SIZE] = {}; + u8 new_insn[X86_CALL_SIZE] = {}; + u8 *prog; + int ret; + + if (!is_kernel_text((long)ip) && + !is_bpf_text_address((long)ip)) + /* BPF trampoline in modules is not supported */ + return -EINVAL; + + if (old_addr) { + prog = old_insn; + ret = emit_call(&prog, old_addr, (void *)ip); + if (ret) + return ret; + } + if (new_addr) { + prog = new_insn; + ret = emit_call(&prog, new_addr, (void *)ip); + if (ret) + return ret; + } + ret = -EBUSY; + mutex_lock(&text_mutex); + switch (t) { + case BPF_MOD_NOP_TO_CALL: + if (memcmp(ip, ideal_nops[NOP_ATOMIC5], X86_CALL_SIZE)) + goto out; + text_poke_bp(ip, new_insn, X86_CALL_SIZE, NULL); + break; + case BPF_MOD_CALL_TO_CALL: + if (memcmp(ip, old_insn, X86_CALL_SIZE)) + goto out; + text_poke_bp(ip, new_insn, X86_CALL_SIZE, NULL); + break; + case BPF_MOD_CALL_TO_NOP: + if (memcmp(ip, old_insn, X86_CALL_SIZE)) + goto out; + text_poke_bp(ip, ideal_nops[NOP_ATOMIC5], X86_CALL_SIZE, NULL); + break; + } + ret = 0; +out: + mutex_unlock(&text_mutex); + return ret; +} static bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs, int trapnr, @@ -773,68 +928,22 @@ st: if (is_imm8(insn->off)) /* STX: *(u8*)(dst_reg + off) = src_reg */ case BPF_STX | BPF_MEM | BPF_B: - /* Emit 'mov byte ptr [rax + off], al' */ - if (is_ereg(dst_reg) || is_ereg(src_reg) || - /* We have to add extra byte for x86 SIL, DIL regs */ - src_reg == BPF_REG_1 || src_reg == BPF_REG_2) - EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); - else - EMIT1(0x88); - goto stx; case BPF_STX | BPF_MEM | BPF_H: - if (is_ereg(dst_reg) || is_ereg(src_reg)) - EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); - else - EMIT2(0x66, 0x89); - goto stx; case BPF_STX | BPF_MEM | BPF_W: - if (is_ereg(dst_reg) || is_ereg(src_reg)) - EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); - else - EMIT1(0x89); - goto stx; case BPF_STX | BPF_MEM | BPF_DW: - EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); -stx: if (is_imm8(insn->off)) - EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); - else - EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), - insn->off); + emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); break; /* LDX: dst_reg = *(u8*)(src_reg + off) */ case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_PROBE_MEM | BPF_B: - /* Emit 'movzx rax, byte ptr [rax + off]' */ - EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); - goto ldx; case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_PROBE_MEM | BPF_H: - /* Emit 'movzx rax, word ptr [rax + off]' */ - EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); - goto ldx; case BPF_LDX | BPF_MEM | BPF_W: case BPF_LDX | BPF_PROBE_MEM | BPF_W: - /* Emit 'mov eax, dword ptr [rax+0x14]' */ - if (is_ereg(dst_reg) || is_ereg(src_reg)) - EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); - else - EMIT1(0x8B); - goto ldx; case BPF_LDX | BPF_MEM | BPF_DW: case BPF_LDX | BPF_PROBE_MEM | BPF_DW: - /* Emit 'mov rax, qword ptr [rax+0x14]' */ - EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); -ldx: /* - * If insn->off == 0 we can save one extra byte, but - * special case of x86 R13 which always needs an offset - * is not worth the hassle - */ - if (is_imm8(insn->off)) - EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off); - else - EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), - insn->off); + emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); if (BPF_MODE(insn->code) == BPF_PROBE_MEM) { struct exception_table_entry *ex; u8 *_insn = image + proglen; @@ -899,13 +1008,8 @@ xadd: if (is_imm8(insn->off)) /* call */ case BPF_JMP | BPF_CALL: func = (u8 *) __bpf_call_base + imm32; - jmp_offset = func - (image + addrs[i]); - if (!imm32 || !is_simm32(jmp_offset)) { - pr_err("unsupported BPF func %d addr %p image %p\n", - imm32, func, image); + if (!imm32 || emit_call(&prog, func, image + addrs[i - 1])) return -EINVAL; - } - EMIT1_off32(0xE8, jmp_offset); break; case BPF_JMP | BPF_TAIL_CALL: @@ -1138,6 +1242,210 @@ emit_jmp: return proglen; } +static void save_regs(struct btf_func_model *m, u8 **prog, int nr_args, + int stack_size) +{ + int i; + /* Store function arguments to stack. + * For a function that accepts two pointers the sequence will be: + * mov QWORD PTR [rbp-0x10],rdi + * mov QWORD PTR [rbp-0x8],rsi + */ + for (i = 0; i < min(nr_args, 6); i++) + emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]), + BPF_REG_FP, + i == 5 ? X86_REG_R9 : BPF_REG_1 + i, + -(stack_size - i * 8)); +} + +static void restore_regs(struct btf_func_model *m, u8 **prog, int nr_args, + int stack_size) +{ + int i; + + /* Restore function arguments from stack. + * For a function that accepts two pointers the sequence will be: + * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] + * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] + */ + for (i = 0; i < min(nr_args, 6); i++) + emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]), + i == 5 ? X86_REG_R9 : BPF_REG_1 + i, + BPF_REG_FP, + -(stack_size - i * 8)); +} + +static int invoke_bpf(struct btf_func_model *m, u8 **pprog, + struct bpf_prog **progs, int prog_cnt, int stack_size) +{ + u8 *prog = *pprog; + int cnt = 0, i; + + for (i = 0; i < prog_cnt; i++) { + if (emit_call(&prog, __bpf_prog_enter, prog)) + return -EINVAL; + /* remember prog start time returned by __bpf_prog_enter */ + emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); + + /* arg1: lea rdi, [rbp - stack_size] */ + EMIT4(0x48, 0x8D, 0x7D, -stack_size); + /* arg2: progs[i]->insnsi for interpreter */ + if (!progs[i]->jited) + emit_mov_imm64(&prog, BPF_REG_2, + (long) progs[i]->insnsi >> 32, + (u32) (long) progs[i]->insnsi); + /* call JITed bpf program or interpreter */ + if (emit_call(&prog, progs[i]->bpf_func, prog)) + return -EINVAL; + + /* arg1: mov rdi, progs[i] */ + emit_mov_imm64(&prog, BPF_REG_1, (long) progs[i] >> 32, + (u32) (long) progs[i]); + /* arg2: mov rsi, rbx <- start time in nsec */ + emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); + if (emit_call(&prog, __bpf_prog_exit, prog)) + return -EINVAL; + } + *pprog = prog; + return 0; +} + +/* Example: + * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); + * its 'struct btf_func_model' will be nr_args=2 + * The assembly code when eth_type_trans is executing after trampoline: + * + * push rbp + * mov rbp, rsp + * sub rsp, 16 // space for skb and dev + * push rbx // temp regs to pass start time + * mov qword ptr [rbp - 16], rdi // save skb pointer to stack + * mov qword ptr [rbp - 8], rsi // save dev pointer to stack + * call __bpf_prog_enter // rcu_read_lock and preempt_disable + * mov rbx, rax // remember start time in bpf stats are enabled + * lea rdi, [rbp - 16] // R1==ctx of bpf prog + * call addr_of_jited_FENTRY_prog + * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off + * mov rsi, rbx // prog start time + * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math + * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack + * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack + * pop rbx + * leave + * ret + * + * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be + * replaced with 'call generated_bpf_trampoline'. When it returns + * eth_type_trans will continue executing with original skb and dev pointers. + * + * The assembly code when eth_type_trans is called from trampoline: + * + * push rbp + * mov rbp, rsp + * sub rsp, 24 // space for skb, dev, return value + * push rbx // temp regs to pass start time + * mov qword ptr [rbp - 24], rdi // save skb pointer to stack + * mov qword ptr [rbp - 16], rsi // save dev pointer to stack + * call __bpf_prog_enter // rcu_read_lock and preempt_disable + * mov rbx, rax // remember start time if bpf stats are enabled + * lea rdi, [rbp - 24] // R1==ctx of bpf prog + * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev + * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off + * mov rsi, rbx // prog start time + * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math + * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack + * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack + * call eth_type_trans+5 // execute body of eth_type_trans + * mov qword ptr [rbp - 8], rax // save return value + * call __bpf_prog_enter // rcu_read_lock and preempt_disable + * mov rbx, rax // remember start time in bpf stats are enabled + * lea rdi, [rbp - 24] // R1==ctx of bpf prog + * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value + * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off + * mov rsi, rbx // prog start time + * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math + * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value + * pop rbx + * leave + * add rsp, 8 // skip eth_type_trans's frame + * ret // return to its caller + */ +int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags, + struct bpf_prog **fentry_progs, int fentry_cnt, + struct bpf_prog **fexit_progs, int fexit_cnt, + void *orig_call) +{ + int cnt = 0, nr_args = m->nr_args; + int stack_size = nr_args * 8; + u8 *prog; + + /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ + if (nr_args > 6) + return -ENOTSUPP; + + if ((flags & BPF_TRAMP_F_RESTORE_REGS) && + (flags & BPF_TRAMP_F_SKIP_FRAME)) + return -EINVAL; + + if (flags & BPF_TRAMP_F_CALL_ORIG) + stack_size += 8; /* room for return value of orig_call */ + + if (flags & BPF_TRAMP_F_SKIP_FRAME) + /* skip patched call instruction and point orig_call to actual + * body of the kernel function. + */ + orig_call += X86_CALL_SIZE; + + prog = image; + + EMIT1(0x55); /* push rbp */ + EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ + EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ + EMIT1(0x53); /* push rbx */ + + save_regs(m, &prog, nr_args, stack_size); + + if (fentry_cnt) + if (invoke_bpf(m, &prog, fentry_progs, fentry_cnt, stack_size)) + return -EINVAL; + + if (flags & BPF_TRAMP_F_CALL_ORIG) { + if (fentry_cnt) + restore_regs(m, &prog, nr_args, stack_size); + + /* call original function */ + if (emit_call(&prog, orig_call, prog)) + return -EINVAL; + /* remember return value in a stack for bpf prog to access */ + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); + } + + if (fexit_cnt) + if (invoke_bpf(m, &prog, fexit_progs, fexit_cnt, stack_size)) + return -EINVAL; + + if (flags & BPF_TRAMP_F_RESTORE_REGS) + restore_regs(m, &prog, nr_args, stack_size); + + if (flags & BPF_TRAMP_F_CALL_ORIG) + /* restore original return value back into RAX */ + emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); + + EMIT1(0x5B); /* pop rbx */ + EMIT1(0xC9); /* leave */ + if (flags & BPF_TRAMP_F_SKIP_FRAME) + /* skip our return address and return to parent */ + EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ + EMIT1(0xC3); /* ret */ + /* One half of the page has active running trampoline. + * Another half is an area for next trampoline. + * Make sure the trampoline generation logic doesn't overflow. + */ + if (WARN_ON_ONCE(prog - (u8 *)image > PAGE_SIZE / 2 - BPF_INSN_SAFETY)) + return -EFAULT; + return 0; +} + struct x64_jit_data { struct bpf_binary_header *header; int *addrs; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index a94ee45440b3..19e75999bb15 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -993,6 +993,7 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, if (sock->ops->shutdown == sock_no_shutdown) { dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); *err = -EINVAL; + sockfd_put(sock); return NULL; } diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 24355680f30a..9da0f93a330b 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -673,16 +673,16 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n) int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev) { struct xfrm_state *x = xfrm_input_state(skb); + unsigned int last_desc, ndesc, flits = 0; struct ipsec_sa_entry *sa_entry; u64 *pos, *end, *before, *sgl; + struct tx_sw_desc *sgl_sdesc; int qidx, left, credits; - unsigned int flits = 0, ndesc; - struct adapter *adap; + bool immediate = false; struct sge_eth_txq *q; + struct adapter *adap; struct port_info *pi; - dma_addr_t addr[MAX_SKB_FRAGS + 1]; struct sec_path *sp; - bool immediate = false; if (!x->xso.offload_handle) return NETDEV_TX_BUSY; @@ -715,8 +715,14 @@ out_free: dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + sgl_sdesc = &q->q.sdesc[last_desc]; + if (!immediate && - unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { + unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { + memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); q->mapping_err++; goto out_free; } @@ -742,17 +748,10 @@ out_free: dev_kfree_skb_any(skb); cxgb4_inline_tx_skb(skb, &q->q, sgl); dev_consume_skb_any(skb); } else { - int last_desc; - cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, - 0, addr); + 0, sgl_sdesc->addr); skb_orphan(skb); - - last_desc = q->q.pidx + ndesc - 1; - if (last_desc >= q->q.size) - last_desc -= q->q.size; - q->q.sdesc[last_desc].skb = skb; - q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; + sgl_sdesc->skb = skb; } txq_advance(&q->q, ndesc); diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c index 0c1ead12d883..4ba4d4a67881 100644 --- a/drivers/gpio/gpio-bd70528.c +++ b/drivers/gpio/gpio-bd70528.c @@ -25,13 +25,13 @@ static int bd70528_set_debounce(struct bd70528_gpio *bdgpio, case 0: val = BD70528_DEBOUNCE_DISABLE; break; - case 1 ... 15: + case 1 ... 15000: val = BD70528_DEBOUNCE_15MS; break; - case 16 ... 30: + case 15001 ... 30000: val = BD70528_DEBOUNCE_30MS; break; - case 31 ... 50: + case 30001 ... 50000: val = BD70528_DEBOUNCE_50MS; break; default: diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c index faf86ea9c51a..642c6321c22a 100644 --- a/drivers/gpio/gpio-max77620.c +++ b/drivers/gpio/gpio-max77620.c @@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio, case 0: val = MAX77620_CNFG_GPIO_DBNC_None; break; - case 1000 ... 8000: + case 1 ... 8000: val = MAX77620_CNFG_GPIO_DBNC_8ms; break; - case 9000 ... 16000: + case 8001 ... 16000: val = MAX77620_CNFG_GPIO_DBNC_16ms; break; - case 17000 ... 32000: + case 16001 ... 32000: val = MAX77620_CNFG_GPIO_DBNC_32ms; break; default: diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 609ed16ae933..59ccfd24627d 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -1304,11 +1304,28 @@ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = { { + /* + * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for + * a non existing micro-USB-B connector which puts the HDMI + * DDC pins in GPIO mode, breaking HDMI support. + */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), } }, + { + /* + * The Terra Pad 1061 has a micro-USB-B id-pin handler, which + * instead of controlling the actual micro-USB-B turns the 5V + * boost for its USB-A connector off. The actual micro-USB-B + * connector is wired for charging only. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"), + DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"), + } + }, {} /* Terminating entry */ }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 1d4aaa9580f4..82efc1e22e61 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -511,7 +511,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, * Also, don't allow GTT domain if the BO doens't have USWC falg set. */ if (adev->asic_type >= CHIP_CARRIZO && - adev->asic_type <= CHIP_RAVEN && + adev->asic_type < CHIP_RAVEN && (adev->flags & AMD_IS_APU) && (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && amdgpu_bo_support_uswc(bo_flags) && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e1c15721611a..b19157b19fa0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1013,10 +1013,10 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, /* Navi14 */ - {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, + {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, + {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, + {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, /* Renoir */ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a042ef471fbd..a73206784cba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -649,15 +649,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return -ENOMEM; alloc_size = info->read_mmr_reg.count * sizeof(*regs); - for (i = 0; i < info->read_mmr_reg.count; i++) + amdgpu_gfx_off_ctrl(adev, false); + for (i = 0; i < info->read_mmr_reg.count; i++) { if (amdgpu_asic_read_register(adev, se_num, sh_num, info->read_mmr_reg.dword_offset + i, ®s[i])) { DRM_DEBUG_KMS("unallowed offset %#x\n", info->read_mmr_reg.dword_offset + i); kfree(regs); + amdgpu_gfx_off_ctrl(adev, true); return -EFAULT; } + } + amdgpu_gfx_off_ctrl(adev, true); n = copy_to_user(out, regs, min(size, alloc_size)); kfree(regs); return n ? -EFAULT : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index dfca83a2de47..97cf0b536873 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1038,8 +1038,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) case CHIP_VEGA20: break; case CHIP_RAVEN: - if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) - &&((adev->gfx.rlc_fw_version != 106 && + /* Disable GFXOFF on original raven. There are combinations + * of sbios and platforms that are not stable. + */ + if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) + &&((adev->gfx.rlc_fw_version != 106 && adev->gfx.rlc_fw_version < 531) || (adev->gfx.rlc_fw_version == 53815) || (adev->gfx.rlc_feature_version < 1) || diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index a52f0b13a2c8..4139f129eafb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -688,7 +688,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) */ if (adev->flags & AMD_IS_APU && adev->asic_type >= CHIP_CARRIZO && - adev->asic_type <= CHIP_RAVEN) + adev->asic_type < CHIP_RAVEN) init_data.flags.gpu_vm_support = true; if (amdgpu_dc_feature_mask & DC_FBC_MASK) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 34f95e0e3ea4..203ce4b1028f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3478,18 +3478,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) { + struct amdgpu_device *adev = hwmgr->adev; int i; u32 tmp = 0; if (!query) return -EINVAL; - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); - tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - *query = tmp; + /* + * PPSMC_MSG_GetCurrPkgPwr is not supported on: + * - Hawaii + * - Bonaire + * - Fiji + * - Tonga + */ + if ((adev->asic_type != CHIP_HAWAII) && + (adev->asic_type != CHIP_BONAIRE) && + (adev->asic_type != CHIP_FIJI) && + (adev->asic_type != CHIP_TONGA)) { + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); + tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + *query = tmp; - if (tmp != 0) - return 0; + if (tmp != 0) + return 0; + } smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 3ec5a10a7c4d..328e258a6895 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -759,6 +759,12 @@ static int navi10_force_clk_levels(struct smu_context *smu, case SMU_UCLK: case SMU_DCEFCLK: case SMU_FCLK: + /* There is only 2 levels for fine grained DPM */ + if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { + soft_max_level = (soft_max_level >= 1 ? 1 : 0); + soft_min_level = (soft_min_level >= 1 ? 1 : 0); + } + ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) return size; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index d3fb75bb9eb1..7cb2257bbb93 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -201,6 +201,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->update_wm_post = false; crtc_state->fb_changed = false; crtc_state->fifo_changed = false; + crtc_state->preload_luts = false; crtc_state->wm.need_postvbl_update = false; crtc_state->fb_bits = 0; crtc_state->update_planes = 0; diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 71a0201437a9..aa1e2c670bc4 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -990,6 +990,55 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state) dev_priv->display.color_commit(crtc_state); } +static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_atomic_state *state = + to_intel_atomic_state(new_crtc_state->base.state); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + + return !old_crtc_state->base.gamma_lut && + !old_crtc_state->base.degamma_lut; +} + +static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_atomic_state *state = + to_intel_atomic_state(new_crtc_state->base.state); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + + /* + * CGM_PIPE_MODE is itself single buffered. We'd have to + * somehow split it out from chv_load_luts() if we wanted + * the ability to preload the CGM LUTs/CSC without tearing. + */ + if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode) + return false; + + return !old_crtc_state->base.gamma_lut; +} + +static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_atomic_state *state = + to_intel_atomic_state(new_crtc_state->base.state); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + + /* + * The hardware degamma is active whenever the pipe + * CSC is active. Thus even if the old state has no + * software degamma we need to avoid clobbering the + * linear hardware degamma mid scanout. + */ + return !old_crtc_state->csc_enable && + !old_crtc_state->base.gamma_lut; +} + int intel_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); @@ -1133,6 +1182,8 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); + return 0; } @@ -1185,6 +1236,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + crtc_state->preload_luts = chv_can_preload_luts(crtc_state); + return 0; } @@ -1224,6 +1277,8 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); + return 0; } @@ -1281,6 +1336,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); + return 0; } @@ -1319,6 +1376,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state) if (ret) return ret; + crtc_state->preload_luts = glk_can_preload_luts(crtc_state); + return 0; } @@ -1368,6 +1427,8 @@ static int icl_color_check(struct intel_crtc_state *crtc_state) crtc_state->csc_mode = icl_csc_mode(crtc_state); + crtc_state->preload_luts = intel_can_preload_luts(crtc_state); + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index dfff6f4357b8..af50f05f4e9d 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -2504,6 +2504,9 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, * the highest stride limits of them all. */ crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); + if (!crtc) + return 0; + plane = to_intel_plane(crtc->base.primary); return plane->max_stride(plane, pixel_format, modifier, @@ -13740,6 +13743,11 @@ static void intel_update_crtc(struct intel_crtc *crtc, /* vblanks work again, re-enable pipe CRC. */ intel_crtc_enable_pipe_crc(crtc); } else { + if (new_crtc_state->preload_luts && + (new_crtc_state->base.color_mgmt_changed || + new_crtc_state->update_pipe)) + intel_color_load_luts(new_crtc_state); + intel_pre_plane_update(old_crtc_state, new_crtc_state); if (new_crtc_state->update_pipe) @@ -14034,6 +14042,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->base.active && !needs_modeset(new_crtc_state) && + !new_crtc_state->preload_luts && (new_crtc_state->base.color_mgmt_changed || new_crtc_state->update_pipe)) intel_color_load_luts(new_crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 449abaea619f..4075b0387c87 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -761,6 +761,7 @@ struct intel_crtc_state { bool update_wm_pre, update_wm_post; /* watermarks are updated */ bool fb_changed; /* fb on any of the planes is changed */ bool fifo_changed; /* FIFO split is changed */ + bool preload_luts; /* Pipe source size (ie. panel fitter input size) * All planes will be positioned inside this space, diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index d59eee5c5d9c..b5c588e511dd 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -235,6 +235,11 @@ static int intelfb_create(struct drm_fb_helper *helper, info->apertures->ranges[0].base = ggtt->gmadr.start; info->apertures->ranges[0].size = ggtt->mappable_end; + /* Our framebuffer is the entirety of fbdev's system memory */ + info->fix.smem_start = + (unsigned long)(ggtt->gmadr.start + vma->node.start); + info->fix.smem_len = vma->node.size; + vaddr = i915_vma_pin_iomap(vma); if (IS_ERR(vaddr)) { DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); @@ -244,10 +249,6 @@ static int intelfb_create(struct drm_fb_helper *helper, info->screen_base = vaddr; info->screen_size = vma->node.size; - /* Our framebuffer is the entirety of fbdev's system memory */ - info->fix.smem_start = (unsigned long)info->screen_base; - info->fix.smem_len = info->screen_size; - drm_fb_helper_fill_info(info, &ifbdev->helper, sizes); /* If the object is shmemfs backed, it will have given us zeroed pages. diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 6b3b50f0f6d9..abfbac49b8e8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -671,8 +671,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, obj->mm.dirty = false; for_each_sgt_page(page, sgt_iter, pages) { - if (obj->mm.dirty) + if (obj->mm.dirty && trylock_page(page)) { + /* + * As this may not be anonymous memory (e.g. shmem) + * but exist on a real mapping, we have to lock + * the page in order to dirty it -- holding + * the page reference is not sufficient to + * prevent the inode from being truncated. + * Play safe and take the lock. + * + * However...! + * + * The mmu-notifier can be invalidated for a + * migrate_page, that is alreadying holding the lock + * on the page. Such a try_to_unmap() will result + * in us calling put_pages() and so recursively try + * to lock the page. We avoid that deadlock with + * a trylock_page() and in exchange we risk missing + * some page dirtying. + */ set_page_dirty(page); + unlock_page(page); + } mark_page_accessed(page); put_page(page); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c index 4cd54c569911..379a91780bd4 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c @@ -103,6 +103,8 @@ node_create(struct intel_engine_pool *pool, size_t sz) return ERR_CAST(obj); } + i915_gem_object_set_readonly(obj); + node->obj = obj; return node; } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 8e251e719390..212acaef581e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -843,8 +843,8 @@ create_event_attributes(struct i915_pmu *pmu) const char *name; const char *unit; } events[] = { - __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"), - __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"), + __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), + __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), __event(I915_PMU_INTERRUPTS, "interrupts", NULL), __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), }; diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 7b84ebca2901..3eba8a2b39c2 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -177,9 +177,37 @@ static inline int rq_prio(const struct i915_request *rq) return rq->sched.attr.priority | __NO_PREEMPTION; } -static void kick_submission(struct intel_engine_cs *engine, int prio) +static inline bool need_preempt(int prio, int active) { - const struct i915_request *inflight = *engine->execlists.active; + /* + * Allow preemption of low -> normal -> high, but we do + * not allow low priority tasks to preempt other low priority + * tasks under the impression that latency for low priority + * tasks does not matter (as much as background throughput), + * so kiss. + */ + return prio >= max(I915_PRIORITY_NORMAL, active); +} + +static void kick_submission(struct intel_engine_cs *engine, + const struct i915_request *rq, + int prio) +{ + const struct i915_request *inflight; + + /* + * We only need to kick the tasklet once for the high priority + * new context we add into the queue. + */ + if (prio <= engine->execlists.queue_priority_hint) + return; + + rcu_read_lock(); + + /* Nothing currently active? We're overdue for a submission! */ + inflight = execlists_active(&engine->execlists); + if (!inflight) + goto unlock; /* * If we are already the currently executing context, don't @@ -188,10 +216,15 @@ static void kick_submission(struct intel_engine_cs *engine, int prio) * tasklet, i.e. we have not change the priority queue * sufficiently to oust the running context. */ - if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight))) - return; + if (inflight->hw_context == rq->hw_context) + goto unlock; - tasklet_hi_schedule(&engine->execlists.tasklet); + engine->execlists.queue_priority_hint = prio; + if (need_preempt(prio, rq_prio(inflight))) + tasklet_hi_schedule(&engine->execlists.tasklet); + +unlock: + rcu_read_unlock(); } static void __i915_schedule(struct i915_sched_node *node, @@ -317,13 +350,8 @@ static void __i915_schedule(struct i915_sched_node *node, list_move_tail(&node->link, cache.priolist); } - if (prio <= engine->execlists.queue_priority_hint) - continue; - - engine->execlists.queue_priority_hint = prio; - /* Defer (tasklet) submission until after all of our updates. */ - kick_submission(engine, prio); + kick_submission(engine, node_to_request(node), prio); } spin_unlock(&engine->active.lock); diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 9cb2aa1e20ef..62a1c92ab803 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -39,6 +39,7 @@ struct i2c_acpi_lookup { int index; u32 speed; u32 min_speed; + u32 force_speed; }; /** @@ -285,6 +286,19 @@ i2c_acpi_match_device(const struct acpi_device_id *matches, return acpi_match_device(matches, &client->dev); } +static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = { + /* + * These Silead touchscreen controllers only work at 400KHz, for + * some reason they do not work at 100KHz. On some devices the ACPI + * tables list another device at their bus as only being capable + * of 100KHz, testing has shown that these other devices work fine + * at 400KHz (as can be expected of any recent i2c hw) so we force + * the speed of the bus to 400 KHz if a Silead device is present. + */ + { "MSSL1680", 0 }, + {} +}; + static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, void *data, void **return_value) { @@ -303,6 +317,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, if (lookup->speed <= lookup->min_speed) lookup->min_speed = lookup->speed; + if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0) + lookup->force_speed = 400000; + return AE_OK; } @@ -340,7 +357,16 @@ u32 i2c_acpi_find_bus_speed(struct device *dev) return 0; } - return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0; + if (lookup.force_speed) { + if (lookup.force_speed != lookup.min_speed) + dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n", + lookup.min_speed, lookup.force_speed); + return lookup.force_speed; + } else if (lookup.min_speed != UINT_MAX) { + return lookup.min_speed; + } else { + return 0; + } } EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c index 6f632d543fcc..7eb41990bd6d 100644 --- a/drivers/i2c/i2c-core-of.c +++ b/drivers/i2c/i2c-core-of.c @@ -245,14 +245,14 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action, } client = of_i2c_register_device(adap, rd->dn); - put_device(&adap->dev); - if (IS_ERR(client)) { dev_err(&adap->dev, "failed to create client for '%pOF'\n", rd->dn); + put_device(&adap->dev); of_node_clear_flag(rd->dn, OF_POPULATED); return notifier_from_errno(PTR_ERR(client)); } + put_device(&adap->dev); break; case OF_RECONFIG_CHANGE_REMOVE: /* already depopulated? */ diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig index 304f50c08da2..078eeadf707a 100644 --- a/drivers/isdn/hardware/mISDN/Kconfig +++ b/drivers/isdn/hardware/mISDN/Kconfig @@ -10,7 +10,7 @@ config MISDN_HFCPCI depends on PCI help Enable support for cards with Cologne Chip AG's - HFC PCI chip. + HFC PCI chip. config MISDN_HFCMULTI tristate "Support for HFC multiport cards (HFC-4S/8S/E1)" diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c index 95fed4664a2d..cbb706dabede 100644 --- a/drivers/misc/vmw_vmci/vmci_driver.c +++ b/drivers/misc/vmw_vmci/vmci_driver.c @@ -30,7 +30,7 @@ static bool vmci_host_personality_initialized; static DEFINE_MUTEX(vmci_vsock_mutex); /* protects vmci_vsock_transport_cb */ static vmci_vsock_cb vmci_vsock_transport_cb; -bool vmci_vsock_cb_host_called; +static bool vmci_vsock_cb_host_called; /* * vmci_get_context_id() - Gets the current context ID. diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index df1c7989e13d..d02f12a5254e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -153,22 +153,22 @@ config IPVLAN_L3S select NET_L3_MASTER_DEV config IPVLAN - tristate "IP-VLAN support" - depends on INET - depends on IPV6 || !IPV6 - ---help--- - This allows one to create virtual devices off of a main interface - and packets will be delivered based on the dest L3 (IPv6/IPv4 addr) - on packets. All interfaces (including the main interface) share L2 - making it transparent to the connected L2 switch. + tristate "IP-VLAN support" + depends on INET + depends on IPV6 || !IPV6 + ---help--- + This allows one to create virtual devices off of a main interface + and packets will be delivered based on the dest L3 (IPv6/IPv4 addr) + on packets. All interfaces (including the main interface) share L2 + making it transparent to the connected L2 switch. - Ipvlan devices can be added using the "ip" command from the - iproute2 package starting with the iproute2-3.19 release: + Ipvlan devices can be added using the "ip" command from the + iproute2 package starting with the iproute2-3.19 release: - "ip link add link <main-dev> [ NAME ] type ipvlan" + "ip link add link <main-dev> [ NAME ] type ipvlan" - To compile this driver as a module, choose M here: the module - will be called ipvlan. + To compile this driver as a module, choose M here: the module + will be called ipvlan. config IPVTAP tristate "IP-VLAN based tap driver" @@ -185,11 +185,11 @@ config IPVTAP will be called ipvtap. config VXLAN - tristate "Virtual eXtensible Local Area Network (VXLAN)" - depends on INET - select NET_UDP_TUNNEL - select GRO_CELLS - ---help--- + tristate "Virtual eXtensible Local Area Network (VXLAN)" + depends on INET + select NET_UDP_TUNNEL + select GRO_CELLS + ---help--- This allows one to create vxlan virtual interfaces that provide Layer 2 Networks over Layer 3 Networks. VXLAN is often used to tunnel virtual network infrastructure in virtualized environments. @@ -200,12 +200,12 @@ config VXLAN will be called vxlan. config GENEVE - tristate "Generic Network Virtualization Encapsulation" - depends on INET - depends on IPV6 || !IPV6 - select NET_UDP_TUNNEL - select GRO_CELLS - ---help--- + tristate "Generic Network Virtualization Encapsulation" + depends on INET + depends on IPV6 || !IPV6 + select NET_UDP_TUNNEL + select GRO_CELLS + ---help--- This allows one to create geneve virtual interfaces that provide Layer 2 Networks over Layer 3 Networks. GENEVE is often used to tunnel virtual network infrastructure in virtualized environments. @@ -244,8 +244,8 @@ config MACSEC config NETCONSOLE tristate "Network console logging support" ---help--- - If you want to log kernel messages over the network, enable this. - See <file:Documentation/networking/netconsole.txt> for details. + If you want to log kernel messages over the network, enable this. + See <file:Documentation/networking/netconsole.txt> for details. config NETCONSOLE_DYNAMIC bool "Dynamic reconfiguration of logging targets" @@ -362,12 +362,12 @@ config NET_VRF support enables VRF devices. config VSOCKMON - tristate "Virtual vsock monitoring device" - depends on VHOST_VSOCK - ---help--- - This option enables a monitoring net device for vsock sockets. It is - mostly intended for developers or support to debug vsock issues. If - unsure, say N. + tristate "Virtual vsock monitoring device" + depends on VHOST_VSOCK + ---help--- + This option enables a monitoring net device for vsock sockets. It is + mostly intended for developers or support to debug vsock issues. If + unsure, say N. endif # NET_CORE diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index 96d7cef3289f..e74e2bb61236 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -16,37 +16,37 @@ config CAIF_TTY depends on CAIF && TTY default n ---help--- - The CAIF TTY transport driver is a Line Discipline (ldisc) - identified as N_CAIF. When this ldisc is opened from user space - it will redirect the TTY's traffic into the CAIF stack. + The CAIF TTY transport driver is a Line Discipline (ldisc) + identified as N_CAIF. When this ldisc is opened from user space + it will redirect the TTY's traffic into the CAIF stack. config CAIF_SPI_SLAVE tristate "CAIF SPI transport driver for slave interface" depends on CAIF && HAS_DMA default n ---help--- - The CAIF Link layer SPI Protocol driver for Slave SPI interface. - This driver implements a platform driver to accommodate for a - platform specific SPI device. A sample CAIF SPI Platform device is - provided in <file:Documentation/networking/caif/spi_porting.txt>. + The CAIF Link layer SPI Protocol driver for Slave SPI interface. + This driver implements a platform driver to accommodate for a + platform specific SPI device. A sample CAIF SPI Platform device is + provided in <file:Documentation/networking/caif/spi_porting.txt>. config CAIF_SPI_SYNC bool "Next command and length in start of frame" depends on CAIF_SPI_SLAVE default n ---help--- - Putting the next command and length in the start of the frame can - help to synchronize to the next transfer in case of over or under-runs. - This option also needs to be enabled on the modem. + Putting the next command and length in the start of the frame can + help to synchronize to the next transfer in case of over or under-runs. + This option also needs to be enabled on the modem. config CAIF_HSI - tristate "CAIF HSI transport driver" - depends on CAIF - default n - ---help--- - The CAIF low level driver for CAIF over HSI. - Be aware that if you enable this then you also need to - enable a low-level HSI driver. + tristate "CAIF HSI transport driver" + depends on CAIF + default n + ---help--- + The CAIF low level driver for CAIF over HSI. + Be aware that if you enable this then you also need to + enable a low-level HSI driver. config CAIF_VIRTIO tristate "CAIF virtio transport driver" @@ -56,7 +56,7 @@ config CAIF_VIRTIO select GENERIC_ALLOCATOR default n ---help--- - The CAIF driver for CAIF over Virtio. + The CAIF driver for CAIF over Virtio. if CAIF_VIRTIO source "drivers/vhost/Kconfig.vringh" diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index 6ac4c35f247a..38ea5e600fb8 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -107,7 +107,7 @@ static int m_can_plat_probe(struct platform_device *pdev) mcan_class->is_peripheral = false; - platform_set_drvdata(pdev, mcan_class->dev); + platform_set_drvdata(pdev, mcan_class->net); m_can_init_ram(mcan_class); @@ -166,8 +166,6 @@ static int __maybe_unused m_can_runtime_resume(struct device *dev) if (err) clk_disable_unprepare(mcan_class->hclk); - m_can_class_resume(dev); - return err; } diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 05e3f2898bf6..b7f92464815d 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -3,6 +3,7 @@ */ #include <uapi/linux/if_bridge.h> #include <soc/mscc/ocelot.h> +#include <linux/packing.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/of.h> @@ -286,7 +287,7 @@ static int felix_setup(struct dsa_switch *ds) for (port = 0; port < ds->num_ports; port++) { ocelot_init_port(ocelot, port); - if (port == dsa_upstream_port(ds, port)) + if (dsa_is_cpu_port(ds, port)) ocelot_set_cpu_port(ocelot, port, OCELOT_TAG_PREFIX_NONE, OCELOT_TAG_PREFIX_LONG); @@ -303,6 +304,62 @@ static void felix_teardown(struct dsa_switch *ds) ocelot_deinit(ocelot); } +static int felix_hwtstamp_get(struct dsa_switch *ds, int port, + struct ifreq *ifr) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_hwstamp_get(ocelot, port, ifr); +} + +static int felix_hwtstamp_set(struct dsa_switch *ds, int port, + struct ifreq *ifr) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_hwstamp_set(ocelot, port, ifr); +} + +static bool felix_rxtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb, unsigned int type) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct ocelot *ocelot = ds->priv; + u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN; + u32 tstamp_lo, tstamp_hi; + struct timespec64 ts; + u64 tstamp, val; + + ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); + tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + + packing(extraction, &val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0); + tstamp_lo = (u32)val; + + tstamp_hi = tstamp >> 32; + if ((tstamp & 0xffffffff) < tstamp_lo) + tstamp_hi--; + + tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = tstamp; + return false; +} + +static bool felix_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *clone, unsigned int type) +{ + struct ocelot *ocelot = ds->priv; + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port_add_txtstamp_skb(ocelot_port, clone)) + return true; + + return false; +} + static const struct dsa_switch_ops felix_switch_ops = { .get_tag_protocol = felix_get_tag_protocol, .setup = felix_setup, @@ -325,12 +382,33 @@ static const struct dsa_switch_ops felix_switch_ops = { .port_vlan_filtering = felix_vlan_filtering, .port_vlan_add = felix_vlan_add, .port_vlan_del = felix_vlan_del, + .port_hwtstamp_get = felix_hwtstamp_get, + .port_hwtstamp_set = felix_hwtstamp_set, + .port_rxtstamp = felix_rxtstamp, + .port_txtstamp = felix_txtstamp, }; static struct felix_info *felix_instance_tbl[] = { [FELIX_INSTANCE_VSC9959] = &felix_info_vsc9959, }; +static irqreturn_t felix_irq_handler(int irq, void *data) +{ + struct ocelot *ocelot = (struct ocelot *)data; + + /* The INTB interrupt is used for both PTP TX timestamp interrupt + * and preemption status change interrupt on each port. + * + * - Get txtstamp if have + * - TODO: handle preemption. Without handling it, driver may get + * interrupt storm. + */ + + ocelot_get_txtstamp(ocelot); + + return IRQ_HANDLED; +} + static int felix_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -372,6 +450,16 @@ static int felix_pci_probe(struct pci_dev *pdev, pci_set_master(pdev); + err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL, + &felix_irq_handler, IRQF_ONESHOT, + "felix-intb", ocelot); + if (err) { + dev_err(&pdev->dev, "Failed to request irq\n"); + goto err_alloc_irq; + } + + ocelot->ptp = 1; + ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL); if (!ds) { err = -ENOMEM; @@ -396,6 +484,7 @@ static int felix_pci_probe(struct pci_dev *pdev, err_register_ds: kfree(ds); err_alloc_ds: +err_alloc_irq: err_alloc_felix: kfree(felix); err_dma: diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index d67bd14a48e0..b9758b0d18c7 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -282,6 +282,16 @@ static const u32 vsc9959_sys_regmap[] = { REG_RESERVED(SYS_CM_DATA), }; +static const u32 vsc9959_ptp_regmap[] = { + REG(PTP_PIN_CFG, 0x000000), + REG(PTP_PIN_TOD_SEC_MSB, 0x000004), + REG(PTP_PIN_TOD_SEC_LSB, 0x000008), + REG(PTP_PIN_TOD_NSEC, 0x00000c), + REG(PTP_CFG_MISC, 0x0000a0), + REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), + REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), +}; + static const u32 vsc9959_gcb_regmap[] = { REG(GCB_SOFT_RST, 0x000004), }; @@ -293,6 +303,7 @@ static const u32 *vsc9959_regmap[] = { [REW] = vsc9959_rew_regmap, [SYS] = vsc9959_sys_regmap, [S2] = vsc9959_s2_regmap, + [PTP] = vsc9959_ptp_regmap, [GCB] = vsc9959_gcb_regmap, }; @@ -330,6 +341,11 @@ static struct resource vsc9959_target_io_res[] = { .end = 0x00603ff, .name = "s2", }, + [PTP] = { + .start = 0x0090000, + .end = 0x00900cb, + .name = "ptp", + }, [GCB] = { .start = 0x0070000, .end = 0x00701ff, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c07172429c70..35bc579d594a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1767,8 +1767,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { - netdev_warn(bp->dev, "RX buffer error %x\n", rx_err); - bnxt_sched_reset(bp, rxr); + bnapi->cp_ring.rx_buf_errors++; + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { + netdev_warn(bp->dev, "RX buffer error %x\n", + rx_err); + bnxt_sched_reset(bp, rxr); + } } goto next_rx_no_len; } @@ -3171,13 +3175,8 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) bnxt_init_rxbd_pages(ring, type); if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { - rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); - if (IS_ERR(rxr->xdp_prog)) { - int rc = PTR_ERR(rxr->xdp_prog); - - rxr->xdp_prog = NULL; - return rc; - } + bpf_prog_add(bp->xdp_prog, 1); + rxr->xdp_prog = bp->xdp_prog; } prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { @@ -4274,6 +4273,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, /* Wait until hwrm response cmpl interrupt is processed */ while (bp->hwrm_intr_seq_id != (u16)~seq_id && i++ < tmo_count) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + return -EBUSY; /* on first few passes, just barely sleep */ if (i < HWRM_SHORT_TIMEOUT_COUNTER) usleep_range(HWRM_SHORT_MIN_TIMEOUT, @@ -4297,6 +4301,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, /* Check if response len is updated */ for (i = 0; i < tmo_count; i++) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + return -EBUSY; len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; if (len) @@ -4437,7 +4446,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE | FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) - flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT; + flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | + FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; req.flags = cpu_to_le32(flags); req.ver_maj_8b = DRV_VER_MAJ; req.ver_min_8b = DRV_VER_MIN; @@ -4601,21 +4611,21 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct flow_keys *keys = &fltr->fkeys; struct bnxt_vnic_info *vnic; - u32 dst_ena = 0; + u32 flags = 0; int rc = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; - if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) { - dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; - req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq); - vnic = &bp->vnic_info[0]; + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; + req.dst_id = cpu_to_le16(fltr->rxq); } else { vnic = &bp->vnic_info[fltr->rxq + 1]; + req.dst_id = cpu_to_le16(vnic->fw_vnic_id); } - req.dst_id = cpu_to_le16(vnic->fw_vnic_id); - req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena); + req.flags = cpu_to_le32(flags); + req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); req.ethertype = htons(ETH_P_IP); memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); @@ -6943,6 +6953,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_ROCEV2_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; + if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) @@ -7042,8 +7054,8 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) flags = le32_to_cpu(resp->flags); if (flags & - CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED) - bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX; + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; hwrm_cfa_adv_qcaps_exit: mutex_unlock(&bp->hwrm_cmd_lock); @@ -9693,7 +9705,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp) static bool bnxt_rfs_supported(struct bnxt *bp) { if (bp->flags & BNXT_FLAG_CHIP_P5) { - if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) return true; return false; } @@ -10115,6 +10127,7 @@ static void bnxt_force_fw_reset(struct bnxt *bp) void bnxt_fw_exception(struct bnxt *bp) { + netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); bnxt_rtnl_lock_sp(bp); bnxt_force_fw_reset(bp); @@ -10743,6 +10756,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); bnxt_ulp_start(bp, rc); + bnxt_dl_health_status_update(bp, true); rtnl_unlock(); break; } @@ -10750,6 +10764,8 @@ static void bnxt_fw_reset_task(struct work_struct *work) fw_reset_abort: clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) + bnxt_dl_health_status_update(bp, false); bp->fw_reset_state = 0; rtnl_lock(); dev_close(bp->dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a3545c846bfb..37549cac3de6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,11 +12,11 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.10.0" +#define DRV_MODULE_VERSION "1.10.1" #define DRV_VER_MAJ 1 #define DRV_VER_MIN 10 -#define DRV_VER_UPD 0 +#define DRV_VER_UPD 1 #include <linux/interrupt.h> #include <linux/rhashtable.h> @@ -932,6 +932,7 @@ struct bnxt_cp_ring_info { dma_addr_t hw_stats_map; u32 hw_stats_ctx_id; u64 rx_l4_csum_errors; + u64 rx_buf_errors; u64 missed_irqs; struct bnxt_ring_struct cp_ring_struct; @@ -1383,6 +1384,7 @@ struct bnxt_fw_health { u32 last_fw_reset_cnt; u8 enabled:1; u8 master:1; + u8 fatal:1; u8 tmr_multiplier; u8 tmr_counter; u8 fw_reset_seq_cnt; @@ -1666,10 +1668,11 @@ struct bnxt { #define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000 #define BNXT_FW_CAP_PKG_VER 0x00004000 #define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000 - #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000 + #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000 #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000 #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 + #define BNXT_FW_CAP_HOT_RESET 0x00200000 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index ae4ddf33fe5c..707827176231 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -91,6 +91,7 @@ static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter, if (!priv_ctx) return -EOPNOTSUPP; + bp->fw_health->fatal = true; event = fw_reporter_ctx->sp_event; if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT) bnxt_fw_reset(bp); @@ -199,6 +200,26 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) } } +void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy) +{ + struct bnxt_fw_health *health = bp->fw_health; + u8 state; + + if (healthy) + state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; + else + state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; + + if (health->fatal) + devlink_health_reporter_state_update(health->fw_fatal_reporter, + state); + else + devlink_health_reporter_state_update(health->fw_reset_reporter, + state); + + health->fatal = false; +} + static const struct devlink_ops bnxt_dl_ops = { #ifdef CONFIG_BNXT_SRIOV .eswitch_mode_set = bnxt_dl_eswitch_mode_set, @@ -314,10 +335,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } else { rc = hwrm_send_message_silent(bp, msg, msg_len, HWRM_CMD_TIMEOUT); - if (!rc) + if (!rc) { bnxt_copy_from_nvm_data(val, data, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); + } else { + struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; + + if (resp->cmd_err == + NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) + rc = -EOPNOTSUPP; + } } dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); if (rc == -EACCES) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 2f4fd0a7d04b..665d4bdcd8c0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -57,6 +57,7 @@ struct bnxt_dl_nvm_param { }; void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); +void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy); int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f2220b826d61..0641020b56d5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -173,6 +173,7 @@ static const char * const bnxt_ring_tpa2_stats_str[] = { static const char * const bnxt_ring_sw_stats_str[] = { "rx_l4_csum_errors", + "rx_buf_errors", "missed_irqs", }; @@ -552,6 +553,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (k = 0; k < stat_fields; j++, k++) buf[j] = le64_to_cpu(hw_stats[k]); buf[j++] = cpr->rx_l4_csum_errors; + buf[j++] = cpr->rx_buf_errors; buf[j++] = cpr->missed_irqs; bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += @@ -1785,6 +1787,8 @@ static int bnxt_firmware_reset(struct net_device *dev, case BNXT_FW_RESET_CHIP: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; break; case BNXT_FW_RESET_AP: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP; @@ -2981,7 +2985,8 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return -EOPNOTSUPP; } - if (pci_vfs_assigned(bp->pdev)) { + if (pci_vfs_assigned(bp->pdev) && + !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { netdev_err(dev, "Reset not allowed when VFs are assigned to VMs\n"); return -EBUSY; @@ -2994,7 +2999,9 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); if (!rc) { - netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); + netdev_info(dev, "Reset request successful.\n"); + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) + netdev_info(dev, "Reload driver to complete reset\n"); *flags = 0; } } else if (*flags == ETH_RESET_AP) { @@ -3038,7 +3045,8 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, mutex_lock(&bp->hwrm_cmd_lock); while (1) { *seq_ptr = cpu_to_le16(seq); - rc = _hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + rc = _hwrm_send_message(bp, msg, msg_len, + HWRM_COREDUMP_TIMEOUT); if (rc) break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 03b197eb793b..7cf27dffadb5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -176,6 +176,9 @@ struct cmd_nums { #define HWRM_RESERVED6 0x65UL #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL + #define HWRM_QUEUE_MPLS_QCAPS 0x80UL + #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL + #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL #define HWRM_CFA_L2_FILTER_FREE 0x91UL #define HWRM_CFA_L2_FILTER_CFG 0x92UL @@ -208,7 +211,7 @@ struct cmd_nums { #define HWRM_FW_QSTATUS 0xc1UL #define HWRM_FW_HEALTH_CHECK 0xc2UL #define HWRM_FW_SYNC 0xc3UL - #define HWRM_FW_STATE_BUFFER_QCAPS 0xc4UL + #define HWRM_FW_STATE_QCAPS 0xc4UL #define HWRM_FW_STATE_QUIESCE 0xc5UL #define HWRM_FW_STATE_BACKUP 0xc6UL #define HWRM_FW_STATE_RESTORE 0xc7UL @@ -225,8 +228,11 @@ struct cmd_nums { #define HWRM_PORT_PRBS_TEST 0xd5UL #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL + #define HWRM_FW_STATE_UNQUIESCE 0xd8UL + #define HWRM_PORT_DSC_DUMP 0xd9UL #define HWRM_TEMP_MONITOR_QUERY 0xe0UL #define HWRM_REG_POWER_QUERY 0xe1UL + #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL #define HWRM_WOL_FILTER_ALLOC 0xf0UL #define HWRM_WOL_FILTER_FREE 0xf1UL #define HWRM_WOL_FILTER_QCFG 0xf2UL @@ -308,6 +314,7 @@ struct cmd_nums { #define HWRM_ENGINE_STATS_CONFIG 0x155UL #define HWRM_ENGINE_STATS_CLEAR 0x156UL #define HWRM_ENGINE_STATS_QUERY 0x157UL + #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL #define HWRM_ENGINE_RQ_ALLOC 0x15eUL #define HWRM_ENGINE_RQ_FREE 0x15fUL #define HWRM_ENGINE_CQ_ALLOC 0x160UL @@ -390,6 +397,7 @@ struct ret_codes { #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL + #define HWRM_ERR_CODE_BUSY 0x10UL #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL @@ -420,9 +428,9 @@ struct hwrm_err_output { #define HWRM_TARGET_ID_TOOLS 0xFFFD #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 -#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 100 -#define HWRM_VERSION_STR "1.10.0.100" +#define HWRM_VERSION_UPDATE 1 +#define HWRM_VERSION_RSVD 12 +#define HWRM_VERSION_STR "1.10.1.12" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -637,6 +645,8 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -1115,6 +1125,7 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -1255,7 +1266,8 @@ struct hwrm_func_qcfg_output { u8 unused_1; u8 always_1; __le32 reset_addr_poll; - u8 unused_2[3]; + __le16 legacy_l2_db_size_kb; + u8 unused_2[1]; u8 valid; }; @@ -1500,6 +1512,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL + #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -1762,7 +1775,7 @@ struct hwrm_func_backing_store_qcaps_input { __le64 resp_addr; }; -/* hwrm_func_backing_store_qcaps_output (size:576b/72B) */ +/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */ struct hwrm_func_backing_store_qcaps_output { __le16 error_code; __le16 req_type; @@ -1792,6 +1805,10 @@ struct hwrm_func_backing_store_qcaps_output { __le32 tim_max_entries; __le16 mrav_num_entries_units; u8 tqm_entries_multiple; + u8 ctx_kind_initializer; + __le32 rsvd; + __le16 rsvd1; + u8 rsvd2; u8 valid; }; @@ -2524,6 +2541,7 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE __le32 preemphasis; @@ -2761,8 +2779,8 @@ struct hwrm_port_mac_ptp_qcfg_output { __le16 resp_len; u8 flags; #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL - #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL u8 unused_0[3]; __le32 rx_ts_reg_off_lower; __le32 rx_ts_reg_off_upper; @@ -3177,10 +3195,12 @@ struct hwrm_port_phy_qcaps_output { __le16 seq_id; __le16 resp_len; u8 flags; - #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfcUL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 2 + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL + #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4 u8 port_cnt; #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL @@ -4980,6 +5000,15 @@ struct hwrm_vnic_rss_cfg_output { u8 valid; }; +/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */ +struct hwrm_vnic_rss_cfg_cmd_err { + u8 code; + #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL + #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY + u8 unused_0[7]; +}; + /* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ struct hwrm_vnic_plcmodes_cfg_input { __le16 req_type; @@ -5807,7 +5836,7 @@ struct hwrm_cfa_encap_record_free_output { u8 valid; }; -/* hwrm_cfa_ntuple_filter_alloc_input (size:1088b/136B) */ +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ struct hwrm_cfa_ntuple_filter_alloc_input { __le16 req_type; __le16 cmpl_ring; @@ -5815,10 +5844,12 @@ struct hwrm_cfa_ntuple_filter_alloc_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL __le32 enables; #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL @@ -5887,8 +5918,6 @@ struct hwrm_cfa_ntuple_filter_alloc_input { __be16 dst_port; __be16 dst_port_mask; __le64 ntuple_filter_id_hint; - __le16 rfs_ring_tbl_idx; - u8 unused_0[6]; }; /* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ @@ -5954,7 +5983,8 @@ struct hwrm_cfa_ntuple_filter_cfg_input { #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL __le32 flags; - #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL __le64 ntuple_filter_id; __le32 new_dst_id; __le32 new_mirror_vnic_id; @@ -6534,18 +6564,21 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output { __le16 seq_id; __le16 resp_len; __le32 flags; - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL - #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL u8 unused_0[3]; u8 valid; }; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 40a44dcb3d9b..f28409279ea4 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1876,13 +1876,8 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) if (nic->xdp_prog) { /* Attach BPF program */ - nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); - if (!IS_ERR(nic->xdp_prog)) { - bpf_attached = true; - } else { - ret = PTR_ERR(nic->xdp_prog); - nic->xdp_prog = NULL; - } + bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); + bpf_attached = true; } /* Calculate Tx queues needed for XDP and network stack */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 49a19308073b..a4b4d475abf8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -8,7 +8,8 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4_mps.o \ - cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o + cudbg_common.o cudbg_lib.o cudbg_zlib.o cxgb4_tc_mqprio.o \ + cxgb4_tc_matchall.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 1fb273b294a1..a70ac2097892 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -603,6 +603,8 @@ struct port_info { u8 vivld; u8 smt_idx; u8 rx_cchan; + + bool tc_block_shared; }; struct dentry; @@ -733,7 +735,12 @@ struct tx_desc { __be64 flit[8]; }; -struct tx_sw_desc; +struct ulptx_sgl; + +struct tx_sw_desc { + struct sk_buff *skb; /* SKB to free after getting completion */ + dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */ +}; struct sge_txq { unsigned int in_use; /* # of in-use Tx descriptors */ @@ -765,6 +772,7 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ u8 dbqt; /* SGE Doorbell Queue Timer in use */ unsigned int dbqtimerix; /* SGE Doorbell Queue Timer Index */ unsigned long tso; /* # of TSO requests */ + unsigned long uso; /* # of USO requests */ unsigned long tx_cso; /* # of Tx checksum offloads */ unsigned long vlan_ins; /* # of Tx VLAN insertions */ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ @@ -812,15 +820,10 @@ enum sge_eosw_state { CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */ }; -struct sge_eosw_desc { - struct sk_buff *skb; /* SKB to free after getting completion */ - dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* DMA mapped addresses */ -}; - struct sge_eosw_txq { spinlock_t lock; /* Per queue lock to synchronize completions */ enum sge_eosw_state state; /* Current ETHOFLD State */ - struct sge_eosw_desc *desc; /* Descriptor ring to hold packets */ + struct tx_sw_desc *desc; /* Descriptor ring to hold packets */ u32 ndesc; /* Number of descriptors */ u32 pidx; /* Current Producer Index */ u32 last_pidx; /* Last successfully transmitted Producer Index */ @@ -847,6 +850,7 @@ struct sge_eohw_txq { struct sge_txq q; /* HW Txq */ struct adapter *adap; /* Backpointer to adapter */ unsigned long tso; /* # of TSO requests */ + unsigned long uso; /* # of USO requests */ unsigned long tx_cso; /* # of Tx checksum offloads */ unsigned long vlan_ins; /* # of Tx VLAN insertions */ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ @@ -1101,6 +1105,9 @@ struct adapter { /* TC MQPRIO offload */ struct cxgb4_tc_mqprio *tc_mqprio; + + /* TC MATCHALL classifier offload */ + struct cxgb4_tc_matchall *tc_matchall; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be @@ -1130,6 +1137,7 @@ enum { enum { SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */ + SCHED_CLASS_LEVEL_CH_RL = 2, /* channel rate limiter */ }; enum { @@ -1145,11 +1153,6 @@ enum { SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */ }; -struct tx_sw_desc { /* SW state per Tx descriptor */ - struct sk_buff *skb; - struct ulptx_sgl *sgl; -}; - /* Support for "sched_queue" command to allow one or more NIC TX Queues * to be bound to a TX Scheduling Class. */ @@ -1280,8 +1283,11 @@ struct ch_filter_specification { u16 nat_lport; /* local port to use after NAT'ing */ u16 nat_fport; /* foreign port to use after NAT'ing */ + u32 tc_prio; /* TC's filter priority index */ + u64 tc_cookie; /* Unique cookie identifying TC rules */ + /* reservation for future additions */ - u8 rsvd[24]; + u8 rsvd[12]; /* Filter rule value/mask pairs. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index a13b03f771cc..93868dca186a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2748,6 +2748,7 @@ do { \ RL("RxDrops:", stats.rx_drops); RL("RxBadPkts:", stats.bad_rx_pkts); TL("TSO:", tso); + TL("USO:", uso); TL("TxCSO:", tx_cso); TL("VLANins:", vlan_ins); TL("TxQFull:", q.stops); @@ -2796,6 +2797,7 @@ do { \ RL("RxAN", stats.an); RL("RxNoMem", stats.nomem); TL("TSO:", tso); + TL("USO:", uso); TL("TxCSO:", tx_cso); TL("VLANins:", vlan_ins); TL("TxQFull:", q.stops); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 76538f4cd595..20ab3b6285a2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -91,6 +91,7 @@ static const char stats_strings[][ETH_GSTRING_LEN] = { "rx_bg3_frames_trunc ", "tso ", + "uso ", "tx_csum_offload ", "rx_csum_good ", "vlan_extractions ", @@ -220,6 +221,7 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data) */ struct queue_port_stats { u64 tso; + u64 uso; u64 tx_csum; u64 rx_csum; u64 vlan_ex; @@ -240,13 +242,15 @@ static void collect_sge_port_stats(const struct adapter *adap, const struct port_info *p, struct queue_port_stats *s) { - int i; const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; + struct sge_eohw_txq *eohw_tx; + unsigned int i; memset(s, 0, sizeof(*s)); for (i = 0; i < p->nqsets; i++, rx++, tx++) { s->tso += tx->tso; + s->uso += tx->uso; s->tx_csum += tx->tx_cso; s->rx_csum += rx->stats.rx_cso; s->vlan_ex += rx->stats.vlan_ex; @@ -254,6 +258,16 @@ static void collect_sge_port_stats(const struct adapter *adap, s->gro_pkts += rx->stats.lro_pkts; s->gro_merged += rx->stats.lro_merged; } + + if (adap->sge.eohw_txq) { + eohw_tx = &adap->sge.eohw_txq[p->first_qset]; + for (i = 0; i < p->nqsets; i++, eohw_tx++) { + s->tso += eohw_tx->tso; + s->uso += eohw_tx->uso; + s->tx_csum += eohw_tx->tx_cso; + s->vlan_ins += eohw_tx->vlan_ins; + } + } } static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 43b0f8c57da7..1d39fca11810 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -440,36 +440,48 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family) { struct adapter *adap = netdev2adap(dev); struct tid_info *t = &adap->tids; + bool found = false; + u8 i, n, cnt; int ftid; - spin_lock_bh(&t->ftid_lock); - if (family == PF_INET) { - ftid = find_first_zero_bit(t->ftid_bmap, t->nftids); - if (ftid >= t->nftids) - ftid = -1; - } else { - if (is_t6(adap->params.chip)) { - ftid = bitmap_find_free_region(t->ftid_bmap, - t->nftids, 1); - if (ftid < 0) - goto out_unlock; - - /* this is only a lookup, keep the found region - * unallocated - */ - bitmap_release_region(t->ftid_bmap, ftid, 1); - } else { - ftid = bitmap_find_free_region(t->ftid_bmap, - t->nftids, 2); - if (ftid < 0) - goto out_unlock; + /* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots + * on T5. + */ + n = 1; + if (family == PF_INET6) { + n++; + if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) + n += 2; + } + + if (n > t->nftids) + return -ENOMEM; - bitmap_release_region(t->ftid_bmap, ftid, 2); + /* Find free filter slots from the end of TCAM. Appropriate + * checks must be done by caller later to ensure the prio + * passed by TC doesn't conflict with prio saved by existing + * rules in the TCAM. + */ + spin_lock_bh(&t->ftid_lock); + ftid = t->nftids - 1; + while (ftid >= n - 1) { + cnt = 0; + for (i = 0; i < n; i++) { + if (test_bit(ftid - i, t->ftid_bmap)) + break; + cnt++; } + if (cnt == n) { + ftid &= ~(n - 1); + found = true; + break; + } + + ftid -= n; } -out_unlock: spin_unlock_bh(&t->ftid_lock); - return ftid; + + return found ? ftid : -ENOMEM; } static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family, @@ -510,6 +522,60 @@ static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family, spin_unlock_bh(&t->ftid_lock); } +bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio) +{ + struct adapter *adap = netdev2adap(dev); + struct filter_entry *prev_fe, *next_fe; + struct tid_info *t = &adap->tids; + u32 prev_ftid, next_ftid; + bool valid = true; + + /* Only insert the rule if both of the following conditions + * are met: + * 1. The immediate previous rule has priority <= @prio. + * 2. The immediate next rule has priority >= @prio. + */ + spin_lock_bh(&t->ftid_lock); + /* Don't insert if there's a rule already present at @idx. */ + if (test_bit(idx, t->ftid_bmap)) { + valid = false; + goto out_unlock; + } + + next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx); + if (next_ftid >= t->nftids) + next_ftid = idx; + + next_fe = &adap->tids.ftid_tab[next_ftid]; + + prev_ftid = find_last_bit(t->ftid_bmap, idx); + if (prev_ftid >= idx) + prev_ftid = idx; + + /* See if the filter entry belongs to an IPv6 rule, which + * occupy 4 slots on T5 and 2 slots on T6. Adjust the + * reference to the previously inserted filter entry + * accordingly. + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6) { + prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x3]; + if (!prev_fe->fs.type) + prev_fe = &adap->tids.ftid_tab[prev_ftid]; + } else { + prev_fe = &adap->tids.ftid_tab[prev_ftid & ~0x1]; + if (!prev_fe->fs.type) + prev_fe = &adap->tids.ftid_tab[prev_ftid]; + } + + if ((prev_fe->valid && prio < prev_fe->fs.tc_prio) || + (next_fe->valid && prio > next_fe->fs.tc_prio)) + valid = false; + +out_unlock: + spin_unlock_bh(&t->ftid_lock); + return valid; +} + /* Delete the filter at a specified index. */ static int del_filter_wr(struct adapter *adapter, int fidx) { @@ -806,6 +872,12 @@ static void fill_default_mask(struct ch_filter_specification *fs) fs->mask.tos |= ~0; if (fs->val.proto && !fs->mask.proto) fs->mask.proto |= ~0; + if (fs->val.pfvf_vld && !fs->mask.pfvf_vld) + fs->mask.pfvf_vld |= ~0; + if (fs->val.pf && !fs->mask.pf) + fs->mask.pf |= ~0; + if (fs->val.vf && !fs->mask.vf) + fs->mask.vf |= ~0; for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) { lip |= fs->val.lip[i]; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h index b0751c0611ec..b3e4a645043d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h @@ -53,4 +53,5 @@ void clear_all_filters(struct adapter *adapter); void init_hash_filter(struct adapter *adap); bool is_filter_exact_match(struct adapter *adap, struct ch_filter_specification *fs); +bool cxgb4_filter_prio_in_range(struct net_device *dev, u32 idx, u32 prio); #endif /* __CXGB4_FILTER_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index b5148c57e8bf..12ff69b3ba91 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -84,6 +84,7 @@ #include "cxgb4_tc_u32.h" #include "cxgb4_tc_flower.h" #include "cxgb4_tc_mqprio.h" +#include "cxgb4_tc_matchall.h" #include "cxgb4_ptp.h" #include "cxgb4_cudbg.h" @@ -1135,11 +1136,17 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, if (dev->num_tc) { struct port_info *pi = netdev2pinfo(dev); + u8 ver, proto; + + ver = ip_hdr(skb)->version; + proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : + ip_hdr(skb)->protocol; /* Send unsupported traffic pattern to normal NIC queues. */ txq = netdev_pick_tx(dev, skb, sb_dev); if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) || - ip_hdr(skb)->protocol != IPPROTO_TCP) + skb->encapsulation || + (proto != IPPROTO_TCP && proto != IPPROTO_UDP)) txq = txq % pi->nqsets; return txq; @@ -3234,8 +3241,33 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev, } } -static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) +static int cxgb_setup_tc_matchall(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall, + bool ingress) +{ + struct adapter *adap = netdev2adap(dev); + + if (!adap->tc_matchall) + return -ENOMEM; + + switch (cls_matchall->command) { + case TC_CLSMATCHALL_REPLACE: + return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress); + case TC_CLSMATCHALL_DESTROY: + return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress); + case TC_CLSMATCHALL_STATS: + if (ingress) + return cxgb4_tc_matchall_stats(dev, cls_matchall); + break; + default: + break; + } + + return -EOPNOTSUPP; +} + +static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) { struct net_device *dev = cb_priv; struct port_info *pi = netdev2pinfo(dev); @@ -3256,11 +3288,40 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, return cxgb_setup_tc_cls_u32(dev, type_data); case TC_SETUP_CLSFLOWER: return cxgb_setup_tc_flower(dev, type_data); + case TC_SETUP_CLSMATCHALL: + return cxgb_setup_tc_matchall(dev, type_data, true); default: return -EOPNOTSUPP; } } +static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct net_device *dev = cb_priv; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { + dev_err(adap->pdev_dev, + "Failed to setup tc on port %d. Link Down?\n", + pi->port_id); + return -EINVAL; + } + + if (!tc_cls_can_offload_and_chain0(dev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return cxgb_setup_tc_matchall(dev, type_data, false); + default: + break; + } + + return -EOPNOTSUPP; +} + static int cxgb_setup_tc_mqprio(struct net_device *dev, struct tc_mqprio_qopt_offload *mqprio) { @@ -3274,19 +3335,34 @@ static int cxgb_setup_tc_mqprio(struct net_device *dev, static LIST_HEAD(cxgb_block_cb_list); +static int cxgb_setup_tc_block(struct net_device *dev, + struct flow_block_offload *f) +{ + struct port_info *pi = netdev_priv(dev); + flow_setup_cb_t *cb; + bool ingress_only; + + pi->tc_block_shared = f->block_shared; + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + cb = cxgb_setup_tc_block_egress_cb; + ingress_only = false; + } else { + cb = cxgb_setup_tc_block_ingress_cb; + ingress_only = true; + } + + return flow_block_cb_setup_simple(f, &cxgb_block_cb_list, + cb, pi, dev, ingress_only); +} + static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - struct port_info *pi = netdev2pinfo(dev); - switch (type) { case TC_SETUP_QDISC_MQPRIO: return cxgb_setup_tc_mqprio(dev, type_data); case TC_SETUP_BLOCK: - return flow_block_cb_setup_simple(type_data, - &cxgb_block_cb_list, - cxgb_setup_tc_block_cb, - pi, dev, true); + return cxgb_setup_tc_block(dev, type_data); default: return -EOPNOTSUPP; } @@ -5741,6 +5817,7 @@ static void free_some_resources(struct adapter *adapter) kvfree(adapter->srq); t4_cleanup_sched(adapter); kvfree(adapter->tids.tid_tab); + cxgb4_cleanup_tc_matchall(adapter); cxgb4_cleanup_tc_mqprio(adapter); cxgb4_cleanup_tc_flower(adapter); cxgb4_cleanup_tc_u32(adapter); @@ -5767,7 +5844,8 @@ static void free_some_resources(struct adapter *adapter) t4_fw_bye(adapter, adapter->pf); } -#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \ + NETIF_F_GSO_UDP_L4) #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) #define SEGMENT_SIZE 128 @@ -6315,6 +6393,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (cxgb4_init_tc_mqprio(adapter)) dev_warn(&pdev->dev, "could not offload tc mqprio, continuing\n"); + + if (cxgb4_init_tc_matchall(adapter)) + dev_warn(&pdev->dev, + "could not offload tc matchall, continuing\n"); } if (is_offload(adapter) || is_hashfilter(adapter)) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index e447976bdd3e..0fa80bef575d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -378,15 +378,14 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, } } -static void cxgb4_process_flow_actions(struct net_device *in, - struct flow_cls_offload *cls, - struct ch_filter_specification *fs) +void cxgb4_process_flow_actions(struct net_device *in, + struct flow_action *actions, + struct ch_filter_specification *fs) { - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_action_entry *act; int i; - flow_action_for_each(i, act, &rule->action) { + flow_action_for_each(i, act, actions) { switch (act->id) { case FLOW_ACTION_ACCEPT: fs->action = FILTER_PASS; @@ -544,17 +543,16 @@ static bool valid_pedit_action(struct net_device *dev, return true; } -static int cxgb4_validate_flow_actions(struct net_device *dev, - struct flow_cls_offload *cls) +int cxgb4_validate_flow_actions(struct net_device *dev, + struct flow_action *actions) { - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_action_entry *act; bool act_redir = false; bool act_pedit = false; bool act_vlan = false; int i; - flow_action_for_each(i, act, &rule->action) { + flow_action_for_each(i, act, actions) { switch (act->id) { case FLOW_ACTION_ACCEPT: case FLOW_ACTION_DROP: @@ -636,14 +634,15 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, int cxgb4_tc_flower_replace(struct net_device *dev, struct flow_cls_offload *cls) { + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; struct adapter *adap = netdev2adap(dev); struct ch_tc_flower_entry *ch_flower; struct ch_filter_specification *fs; struct filter_ctx ctx; - int fidx; - int ret; + int fidx, ret; - if (cxgb4_validate_flow_actions(dev, cls)) + if (cxgb4_validate_flow_actions(dev, &rule->action)) return -EOPNOTSUPP; if (cxgb4_validate_flow_match(dev, cls)) @@ -658,20 +657,41 @@ int cxgb4_tc_flower_replace(struct net_device *dev, fs = &ch_flower->fs; fs->hitcnts = 1; cxgb4_process_flow_match(dev, cls, fs); - cxgb4_process_flow_actions(dev, cls, fs); + cxgb4_process_flow_actions(dev, &rule->action, fs); fs->hash = is_filter_exact_match(adap, fs); if (fs->hash) { fidx = 0; } else { - fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); - if (fidx < 0) { - netdev_err(dev, "%s: No fidx for offload.\n", __func__); + u8 inet_family; + + inet_family = fs->type ? PF_INET6 : PF_INET; + + /* Note that TC uses prio 0 to indicate stack to + * generate automatic prio and hence doesn't pass prio + * 0 to driver. However, the hardware TCAM index + * starts from 0. Hence, the -1 here. + */ + if (cls->common.prio <= adap->tids.nftids) + fidx = cls->common.prio - 1; + else + fidx = cxgb4_get_free_ftid(dev, inet_family); + + /* Only insert FLOWER rule if its priority doesn't + * conflict with existing rules in the LETCAM. + */ + if (fidx < 0 || + !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) { + NL_SET_ERR_MSG_MOD(extack, + "No free LETCAM index available"); ret = -ENOMEM; goto free_entry; } } + fs->tc_prio = cls->common.prio; + fs->tc_cookie = cls->cookie; + init_completion(&ctx.completion); ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); if (ret) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index eb4c95248baf..e132516e9868 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -108,6 +108,12 @@ struct ch_tc_pedit_fields { #define PEDIT_TCP_SPORT_DPORT 0x0 #define PEDIT_UDP_SPORT_DPORT 0x0 +void cxgb4_process_flow_actions(struct net_device *in, + struct flow_action *actions, + struct ch_filter_specification *fs); +int cxgb4_validate_flow_actions(struct net_device *dev, + struct flow_action *actions); + int cxgb4_tc_flower_replace(struct net_device *dev, struct flow_cls_offload *cls); int cxgb4_tc_flower_destroy(struct net_device *dev, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c new file mode 100644 index 000000000000..102b370fbd3e --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ + +#include "cxgb4.h" +#include "cxgb4_tc_matchall.h" +#include "sched.h" +#include "cxgb4_uld.h" +#include "cxgb4_filter.h" +#include "cxgb4_tc_flower.h" + +static int cxgb4_matchall_egress_validate(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct flow_action *actions = &cls->rule->action; + struct port_info *pi = netdev2pinfo(dev); + struct flow_action_entry *entry; + u64 max_link_rate; + u32 i, speed; + int ret; + + if (!flow_action_has_entries(actions)) { + NL_SET_ERR_MSG_MOD(extack, + "Egress MATCHALL offload needs at least 1 policing action"); + return -EINVAL; + } else if (!flow_offload_has_one_action(actions)) { + NL_SET_ERR_MSG_MOD(extack, + "Egress MATCHALL offload only supports 1 policing action"); + return -EINVAL; + } else if (pi->tc_block_shared) { + NL_SET_ERR_MSG_MOD(extack, + "Egress MATCHALL offload not supported with shared blocks"); + return -EINVAL; + } + + ret = t4_get_link_params(pi, NULL, &speed, NULL); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to get max speed supported by the link"); + return -EINVAL; + } + + /* Convert from Mbps to bps */ + max_link_rate = (u64)speed * 1000 * 1000; + + flow_action_for_each(i, entry, actions) { + switch (entry->id) { + case FLOW_ACTION_POLICE: + /* Convert bytes per second to bits per second */ + if (entry->police.rate_bytes_ps * 8 > max_link_rate) { + NL_SET_ERR_MSG_MOD(extack, + "Specified policing max rate is larger than underlying link speed"); + return -ERANGE; + } + break; + default: + NL_SET_ERR_MSG_MOD(extack, + "Only policing action supported with Egress MATCHALL offload"); + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int cxgb4_matchall_alloc_tc(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct ch_sched_params p = { + .type = SCHED_CLASS_TYPE_PACKET, + .u.params.level = SCHED_CLASS_LEVEL_CH_RL, + .u.params.mode = SCHED_CLASS_MODE_CLASS, + .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS, + .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS, + .u.params.class = SCHED_CLS_NONE, + .u.params.minrate = 0, + .u.params.weight = 0, + .u.params.pktsize = dev->mtu, + }; + struct netlink_ext_ack *extack = cls->common.extack; + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct flow_action_entry *entry; + struct sched_class *e; + u32 i; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + + flow_action_for_each(i, entry, &cls->rule->action) + if (entry->id == FLOW_ACTION_POLICE) + break; + + /* Convert from bytes per second to Kbps */ + p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000); + p.u.params.channel = pi->tx_chan; + e = cxgb4_sched_class_alloc(dev, &p); + if (!e) { + NL_SET_ERR_MSG_MOD(extack, + "No free traffic class available for policing action"); + return -ENOMEM; + } + + tc_port_matchall->egress.hwtc = e->idx; + tc_port_matchall->egress.cookie = cls->cookie; + tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED; + return 0; +} + +static void cxgb4_matchall_free_tc(struct net_device *dev) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc); + + tc_port_matchall->egress.hwtc = SCHED_CLS_NONE; + tc_port_matchall->egress.cookie = 0; + tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED; +} + +static int cxgb4_matchall_alloc_filter(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct ch_filter_specification *fs; + int ret, fidx; + + /* Note that TC uses prio 0 to indicate stack to generate + * automatic prio and hence doesn't pass prio 0 to driver. + * However, the hardware TCAM index starts from 0. Hence, the + * -1 here. 1 slot is enough to create a wildcard matchall + * VIID rule. + */ + if (cls->common.prio <= adap->tids.nftids) + fidx = cls->common.prio - 1; + else + fidx = cxgb4_get_free_ftid(dev, PF_INET); + + /* Only insert MATCHALL rule if its priority doesn't conflict + * with existing rules in the LETCAM. + */ + if (fidx < 0 || + !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) { + NL_SET_ERR_MSG_MOD(extack, + "No free LETCAM index available"); + return -ENOMEM; + } + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + fs = &tc_port_matchall->ingress.fs; + memset(fs, 0, sizeof(*fs)); + + fs->tc_prio = cls->common.prio; + fs->tc_cookie = cls->cookie; + fs->hitcnts = 1; + + fs->val.pfvf_vld = 1; + fs->val.pf = adap->pf; + fs->val.vf = pi->vin; + + cxgb4_process_flow_actions(dev, &cls->rule->action, fs); + + ret = cxgb4_set_filter(dev, fidx, fs); + if (ret) + return ret; + + tc_port_matchall->ingress.tid = fidx; + tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED; + return 0; +} + +static int cxgb4_matchall_free_filter(struct net_device *dev) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + + ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid, + &tc_port_matchall->ingress.fs); + if (ret) + return ret; + + tc_port_matchall->ingress.packets = 0; + tc_port_matchall->ingress.bytes = 0; + tc_port_matchall->ingress.last_used = 0; + tc_port_matchall->ingress.tid = 0; + tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED; + return 0; +} + +int cxgb4_tc_matchall_replace(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall, + bool ingress) +{ + struct netlink_ext_ack *extack = cls_matchall->common.extack; + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + if (ingress) { + if (tc_port_matchall->ingress.state == + CXGB4_MATCHALL_STATE_ENABLED) { + NL_SET_ERR_MSG_MOD(extack, + "Only 1 Ingress MATCHALL can be offloaded"); + return -ENOMEM; + } + + ret = cxgb4_validate_flow_actions(dev, + &cls_matchall->rule->action); + if (ret) + return ret; + + return cxgb4_matchall_alloc_filter(dev, cls_matchall); + } + + if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) { + NL_SET_ERR_MSG_MOD(extack, + "Only 1 Egress MATCHALL can be offloaded"); + return -ENOMEM; + } + + ret = cxgb4_matchall_egress_validate(dev, cls_matchall); + if (ret) + return ret; + + return cxgb4_matchall_alloc_tc(dev, cls_matchall); +} + +int cxgb4_tc_matchall_destroy(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall, + bool ingress) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + if (ingress) { + if (cls_matchall->cookie != + tc_port_matchall->ingress.fs.tc_cookie) + return -ENOENT; + + return cxgb4_matchall_free_filter(dev); + } + + if (cls_matchall->cookie != tc_port_matchall->egress.cookie) + return -ENOENT; + + cxgb4_matchall_free_tc(dev); + return 0; +} + +int cxgb4_tc_matchall_stats(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + u64 packets, bytes; + int ret; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED) + return -ENOENT; + + ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid, + &packets, &bytes, + tc_port_matchall->ingress.fs.hash); + if (ret) + return ret; + + if (tc_port_matchall->ingress.packets != packets) { + flow_stats_update(&cls_matchall->stats, + bytes - tc_port_matchall->ingress.bytes, + packets - tc_port_matchall->ingress.packets, + tc_port_matchall->ingress.last_used); + + tc_port_matchall->ingress.packets = packets; + tc_port_matchall->ingress.bytes = bytes; + tc_port_matchall->ingress.last_used = jiffies; + } + + return 0; +} + +static void cxgb4_matchall_disable_offload(struct net_device *dev) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) + cxgb4_matchall_free_tc(dev); + + if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED) + cxgb4_matchall_free_filter(dev); +} + +int cxgb4_init_tc_matchall(struct adapter *adap) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct cxgb4_tc_matchall *tc_matchall; + int ret; + + tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL); + if (!tc_matchall) + return -ENOMEM; + + tc_port_matchall = kcalloc(adap->params.nports, + sizeof(*tc_port_matchall), + GFP_KERNEL); + if (!tc_port_matchall) { + ret = -ENOMEM; + goto out_free_matchall; + } + + tc_matchall->port_matchall = tc_port_matchall; + adap->tc_matchall = tc_matchall; + return 0; + +out_free_matchall: + kfree(tc_matchall); + return ret; +} + +void cxgb4_cleanup_tc_matchall(struct adapter *adap) +{ + u8 i; + + if (adap->tc_matchall) { + if (adap->tc_matchall->port_matchall) { + for (i = 0; i < adap->params.nports; i++) { + struct net_device *dev = adap->port[i]; + + if (dev) + cxgb4_matchall_disable_offload(dev); + } + kfree(adap->tc_matchall->port_matchall); + } + kfree(adap->tc_matchall); + } +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h new file mode 100644 index 000000000000..ab6b5683dfd3 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2019 Chelsio Communications. All rights reserved. */ + +#ifndef __CXGB4_TC_MATCHALL_H__ +#define __CXGB4_TC_MATCHALL_H__ + +#include <net/pkt_cls.h> + +enum cxgb4_matchall_state { + CXGB4_MATCHALL_STATE_DISABLED = 0, + CXGB4_MATCHALL_STATE_ENABLED, +}; + +struct cxgb4_matchall_egress_entry { + enum cxgb4_matchall_state state; /* Current MATCHALL offload state */ + u8 hwtc; /* Traffic class bound to port */ + u64 cookie; /* Used to identify the MATCHALL rule offloaded */ +}; + +struct cxgb4_matchall_ingress_entry { + enum cxgb4_matchall_state state; /* Current MATCHALL offload state */ + u32 tid; /* Index to hardware filter entry */ + struct ch_filter_specification fs; /* Filter entry */ + u64 bytes; /* # of bytes hitting the filter */ + u64 packets; /* # of packets hitting the filter */ + u64 last_used; /* Last updated jiffies time */ +}; + +struct cxgb4_tc_port_matchall { + struct cxgb4_matchall_egress_entry egress; /* Egress offload info */ + struct cxgb4_matchall_ingress_entry ingress; /* Ingress offload info */ +}; + +struct cxgb4_tc_matchall { + struct cxgb4_tc_port_matchall *port_matchall; /* Per port entry */ +}; + +int cxgb4_tc_matchall_replace(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall, + bool ingress); +int cxgb4_tc_matchall_destroy(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall, + bool ingress); +int cxgb4_tc_matchall_stats(struct net_device *dev, + struct tc_cls_matchall_offload *cls_matchall); + +int cxgb4_init_tc_matchall(struct adapter *adap); +void cxgb4_cleanup_tc_matchall(struct adapter *adap); +#endif /* __CXGB4_TC_MATCHALL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c index ce442c63f496..477973d2e341 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c @@ -11,8 +11,7 @@ static int cxgb4_mqprio_validate(struct net_device *dev, u64 min_rate = 0, max_rate = 0, max_link_rate; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); - u32 qcount = 0, qoffset = 0; - u32 link_ok, speed, mtu; + u32 speed, qcount = 0, qoffset = 0; int ret; u8 i; @@ -35,7 +34,7 @@ static int cxgb4_mqprio_validate(struct net_device *dev, return -ERANGE; } - ret = t4_get_link_params(pi, &link_ok, &speed, &mtu); + ret = t4_get_link_params(pi, NULL, &speed, NULL); if (ret) { netdev_err(dev, "Failed to get link speed, ret: %d\n", ret); return -EINVAL; @@ -71,7 +70,7 @@ static int cxgb4_init_eosw_txq(struct net_device *dev, u32 eotid, u32 hwqid) { struct adapter *adap = netdev2adap(dev); - struct sge_eosw_desc *ring; + struct tx_sw_desc *ring; memset(eosw_txq, 0, sizeof(*eosw_txq)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 02fc63fa7f25..133f8623ba86 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -36,6 +36,7 @@ #include <net/tc_act/tc_mirred.h> #include "cxgb4.h" +#include "cxgb4_filter.h" #include "cxgb4_tc_u32_parse.h" #include "cxgb4_tc_u32.h" @@ -148,6 +149,7 @@ static int fill_action_fields(struct adapter *adap, int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) { const struct cxgb4_match_field *start, *link_start = NULL; + struct netlink_ext_ack *extack = cls->common.extack; struct adapter *adapter = netdev2adap(dev); __be16 protocol = cls->common.protocol; struct ch_filter_specification fs; @@ -164,14 +166,21 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6)) return -EOPNOTSUPP; - /* Fetch the location to insert the filter. */ - filter_id = cls->knode.handle & 0xFFFFF; + /* Note that TC uses prio 0 to indicate stack to generate + * automatic prio and hence doesn't pass prio 0 to driver. + * However, the hardware TCAM index starts from 0. Hence, the + * -1 here. + */ + filter_id = TC_U32_NODE(cls->knode.handle) - 1; - if (filter_id > adapter->tids.nftids) { - dev_err(adapter->pdev_dev, - "Location %d out of range for insertion. Max: %d\n", - filter_id, adapter->tids.nftids); - return -ERANGE; + /* Only insert U32 rule if its priority doesn't conflict with + * existing rules in the LETCAM. + */ + if (filter_id >= adapter->tids.nftids || + !cxgb4_filter_prio_in_range(dev, filter_id, cls->common.prio)) { + NL_SET_ERR_MSG_MOD(extack, + "No free LETCAM index available"); + return -ENOMEM; } t = adapter->tc_u32; @@ -190,6 +199,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) memset(&fs, 0, sizeof(fs)); + fs.tc_prio = cls->common.prio; + fs.tc_cookie = cls->knode.handle; + if (protocol == htons(ETH_P_IPV6)) { start = cxgb4_ipv6_fields; is_ipv6 = true; @@ -350,14 +362,10 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) return -EOPNOTSUPP; /* Fetch the location to delete the filter. */ - filter_id = cls->knode.handle & 0xFFFFF; - - if (filter_id > adapter->tids.nftids) { - dev_err(adapter->pdev_dev, - "Location %d out of range for deletion. Max: %d\n", - filter_id, adapter->tids.nftids); + filter_id = TC_U32_NODE(cls->knode.handle) - 1; + if (filter_id >= adapter->tids.nftids || + cls->knode.handle != adapter->tids.ftid_tab[filter_id].fs.tc_cookie) return -ERANGE; - } t = adapter->tc_u32; handle = cls->knode.handle; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 0a98c4dbb36b..3e61bd5d0c29 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -50,6 +50,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi, e = &s->tab[p->u.params.class]; switch (op) { case SCHED_FW_OP_ADD: + case SCHED_FW_OP_DEL: err = t4_sched_params(adap, p->type, p->u.params.level, p->u.params.mode, p->u.params.rateunit, @@ -188,10 +189,8 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) e = &pi->sched_tbl->tab[qe->param.class]; list_del(&qe->list); kvfree(qe); - if (atomic_dec_and_test(&e->refcnt)) { - e->state = SCHED_STATE_UNUSED; - memset(&e->info, 0, sizeof(e->info)); - } + if (atomic_dec_and_test(&e->refcnt)) + cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); } return err; } @@ -261,10 +260,8 @@ static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p) e = &pi->sched_tbl->tab[fe->param.class]; list_del(&fe->list); kvfree(fe); - if (atomic_dec_and_test(&e->refcnt)) { - e->state = SCHED_STATE_UNUSED; - memset(&e->info, 0, sizeof(e->info)); - } + if (atomic_dec_and_test(&e->refcnt)) + cxgb4_sched_class_free(adap->port[pi->port_id], e->idx); } return err; } @@ -469,10 +466,7 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, struct sched_class *found = NULL; struct sched_class *e, *end; - /* Only allow tc to be shared among SCHED_FLOWC types. For - * other types, always allocate a new tc. - */ - if (!p || p->u.params.mode != SCHED_CLASS_MODE_FLOW) { + if (!p) { /* Get any available unused class */ end = &s->tab[s->sched_size]; for (e = &s->tab[0]; e != end; ++e) { @@ -514,7 +508,7 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, static struct sched_class *t4_sched_class_alloc(struct port_info *pi, struct ch_sched_params *p) { - struct sched_class *e; + struct sched_class *e = NULL; u8 class_id; int err; @@ -529,10 +523,13 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, if (class_id != SCHED_CLS_NONE) return NULL; - /* See if there's an exisiting class with same - * requested sched params + /* See if there's an exisiting class with same requested sched + * params. Classes can only be shared among FLOWC types. For + * other types, always request a new class. */ - e = t4_sched_class_lookup(pi, p); + if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) + e = t4_sched_class_lookup(pi, p); + if (!e) { struct ch_sched_params np; @@ -592,10 +589,35 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid) { struct port_info *pi = netdev2pinfo(dev); struct sched_table *s = pi->sched_tbl; + struct ch_sched_params p; struct sched_class *e; + u32 speed; + int ret; e = &s->tab[classid]; - if (!atomic_read(&e->refcnt)) { + if (!atomic_read(&e->refcnt) && e->state != SCHED_STATE_UNUSED) { + /* Port based rate limiting needs explicit reset back + * to max rate. But, we'll do explicit reset for all + * types, instead of just port based type, to be on + * the safer side. + */ + memcpy(&p, &e->info, sizeof(p)); + /* Always reset mode to 0. Otherwise, FLOWC mode will + * still be enabled even after resetting the traffic + * class. + */ + p.u.params.mode = 0; + p.u.params.minrate = 0; + p.u.params.pktsize = 0; + + ret = t4_get_link_params(pi, NULL, &speed, NULL); + if (!ret) + p.u.params.maxrate = speed * 1000; /* Mbps to Kbps */ + else + p.u.params.maxrate = SCHED_MAX_RATE_KBPS; + + t4_sched_class_fw_cmd(pi, &p, SCHED_FW_OP_DEL); + e->state = SCHED_STATE_UNUSED; memset(&e->info, 0, sizeof(e->info)); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index 80bed8e59362..e92ff68bdd0a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -52,6 +52,7 @@ enum { enum sched_fw_ops { SCHED_FW_OP_ADD, + SCHED_FW_OP_DEL, }; enum sched_bind_type { diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 09059adc3067..97cda501e7e8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -300,65 +300,6 @@ static void deferred_unmap_destructor(struct sk_buff *skb) } #endif -static void unmap_sgl(struct device *dev, const struct sk_buff *skb, - const struct ulptx_sgl *sgl, const struct sge_txq *q) -{ - const struct ulptx_sge_pair *p; - unsigned int nfrags = skb_shinfo(skb)->nr_frags; - - if (likely(skb_headlen(skb))) - dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), - DMA_TO_DEVICE); - else { - dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), - DMA_TO_DEVICE); - nfrags--; - } - - /* - * the complexity below is because of the possibility of a wrap-around - * in the middle of an SGL - */ - for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { - if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { -unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), - ntohl(p->len[0]), DMA_TO_DEVICE); - dma_unmap_page(dev, be64_to_cpu(p->addr[1]), - ntohl(p->len[1]), DMA_TO_DEVICE); - p++; - } else if ((u8 *)p == (u8 *)q->stat) { - p = (const struct ulptx_sge_pair *)q->desc; - goto unmap; - } else if ((u8 *)p + 8 == (u8 *)q->stat) { - const __be64 *addr = (const __be64 *)q->desc; - - dma_unmap_page(dev, be64_to_cpu(addr[0]), - ntohl(p->len[0]), DMA_TO_DEVICE); - dma_unmap_page(dev, be64_to_cpu(addr[1]), - ntohl(p->len[1]), DMA_TO_DEVICE); - p = (const struct ulptx_sge_pair *)&addr[2]; - } else { - const __be64 *addr = (const __be64 *)q->desc; - - dma_unmap_page(dev, be64_to_cpu(p->addr[0]), - ntohl(p->len[0]), DMA_TO_DEVICE); - dma_unmap_page(dev, be64_to_cpu(addr[0]), - ntohl(p->len[1]), DMA_TO_DEVICE); - p = (const struct ulptx_sge_pair *)&addr[1]; - } - } - if (nfrags) { - __be64 addr; - - if ((u8 *)p == (u8 *)q->stat) - p = (const struct ulptx_sge_pair *)q->desc; - addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : - *(const __be64 *)q->desc; - dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), - DMA_TO_DEVICE); - } -} - /** * free_tx_desc - reclaims Tx descriptors and their buffers * @adapter: the adapter @@ -372,15 +313,16 @@ unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), void free_tx_desc(struct adapter *adap, struct sge_txq *q, unsigned int n, bool unmap) { - struct tx_sw_desc *d; unsigned int cidx = q->cidx; - struct device *dev = adap->pdev_dev; + struct tx_sw_desc *d; d = &q->sdesc[cidx]; while (n--) { if (d->skb) { /* an SGL is present */ - if (unmap) - unmap_sgl(dev, d->skb, d->sgl, q); + if (unmap && d->addr[0]) { + unmap_skb(adap->pdev_dev, d->skb, d->addr); + memset(d->addr, 0, sizeof(d->addr)); + } dev_consume_skb_any(d->skb); d->skb = NULL; } @@ -792,6 +734,8 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) chip_ver > CHELSIO_T5) { hdrlen = sizeof(struct cpl_tx_tnl_lso); hdrlen += sizeof(struct cpl_tx_pkt_core); + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + return 0; } else { hdrlen = skb_shinfo(skb)->gso_size ? sizeof(struct cpl_tx_pkt_lso_core) : 0; @@ -833,12 +777,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb, */ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); if (skb_shinfo(skb)->gso_size) { - if (skb->encapsulation && chip_ver > CHELSIO_T5) + if (skb->encapsulation && chip_ver > CHELSIO_T5) { hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_tnl_lso); - else + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + u32 pkt_hdrlen; + + pkt_hdrlen = eth_get_headlen(skb->dev, skb->data, + skb_headlen(skb)); + hdrlen = sizeof(struct fw_eth_tx_eo_wr) + + round_up(pkt_hdrlen, 16); + } else { hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_lso_core); + } hdrlen += sizeof(struct cpl_tx_pkt_core); flits += (hdrlen / sizeof(__be64)); @@ -1403,6 +1355,25 @@ static inline int cxgb4_validate_skb(struct sk_buff *skb, return 0; } +static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, + u32 hdr_len) +{ + wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; + wr->u.udpseg.ethlen = skb_network_offset(skb); + wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); + wr->u.udpseg.udplen = sizeof(struct udphdr); + wr->u.udpseg.rtplen = 0; + wr->u.udpseg.r4 = 0; + if (skb_shinfo(skb)->gso_size) + wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + else + wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len); + wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; + wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len); + + return (void *)(wr + 1); +} + /** * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet @@ -1414,15 +1385,16 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; bool ptp_enabled = is_ptp_enabled(skb, dev); - dma_addr_t addr[MAX_SKB_FRAGS + 1]; + unsigned int last_desc, flits, ndesc; + u32 wr_mid, ctrl0, op, sgl_off = 0; const struct skb_shared_info *ssi; + int len, qidx, credits, ret, left; + struct tx_sw_desc *sgl_sdesc; + struct fw_eth_tx_eo_wr *eowr; struct fw_eth_tx_pkt_wr *wr; struct cpl_tx_pkt_core *cpl; - int len, qidx, credits, ret; const struct port_info *pi; - unsigned int flits, ndesc; bool immediate = false; - u32 wr_mid, ctrl0, op; u64 cntrl, *end, *sgl; struct sge_eth_txq *q; unsigned int chip_ver; @@ -1489,8 +1461,14 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->encapsulation && chip_ver > CHELSIO_T5) tnl_type = cxgb_encap_offload_supported(skb); + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + sgl_sdesc = &q->q.sdesc[last_desc]; + if (!immediate && - unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { + unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { + memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); q->mapping_err++; if (ptp_enabled) spin_unlock(&adap->ptp_lock); @@ -1521,13 +1499,17 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) } wr = (void *)&q->q.desc[q->q.pidx]; + eowr = (void *)&q->q.desc[q->q.pidx]; wr->equiq_to_len16 = htonl(wr_mid); wr->r3 = cpu_to_be64(0); - end = (u64 *)wr + flits; + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + end = (u64 *)eowr + flits; + else + end = (u64 *)wr + flits; len = immediate ? skb->len : 0; len += sizeof(*cpl); - if (ssi->gso_size) { + if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) { struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); @@ -1559,20 +1541,29 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) cntrl = hwcsum(adap->params.chip, skb); } sgl = (u64 *)(cpl + 1); /* sgl start here */ - if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { - /* If current position is already at the end of the - * txq, reset the current to point to start of the queue - * and update the end ptr as well. - */ - if (sgl == (u64 *)q->q.stat) { - int left = (u8 *)end - (u8 *)q->q.stat; - - end = (void *)q->q.desc + left; - sgl = (void *)q->q.desc; - } - } q->tso++; q->tx_cso += ssi->gso_segs; + } else if (ssi->gso_size) { + u64 *start; + u32 hdrlen; + + hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb)); + len += hdrlen; + wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | + FW_ETH_TX_EO_WR_IMMDLEN_V(len)); + cpl = write_eo_udp_wr(skb, eowr, hdrlen); + cntrl = hwcsum(adap->params.chip, skb); + + start = (u64 *)(cpl + 1); + sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start, + hdrlen); + if (unlikely(start > sgl)) { + left = (u8 *)end - (u8 *)q->q.stat; + end = (void *)q->q.desc + left; + } + sgl_off = hdrlen; + q->uso++; + q->tx_cso += ssi->gso_segs; } else { if (ptp_enabled) op = FW_PTP_TX_PKT_WR; @@ -1589,6 +1580,16 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) } } + if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { + /* If current position is already at the end of the + * txq, reset the current to point to start of the queue + * and update the end ptr as well. + */ + left = (u8 *)end - (u8 *)q->q.stat; + end = (void *)q->q.desc + left; + sgl = (void *)q->q.desc; + } + if (skb_vlan_tag_present(skb)) { q->vlan_ins++; cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); @@ -1618,16 +1619,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) cxgb4_inline_tx_skb(skb, &q->q, sgl); dev_consume_skb_any(skb); } else { - int last_desc; - - cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr); + cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off, + sgl_sdesc->addr); skb_orphan(skb); - - last_desc = q->q.pidx + ndesc - 1; - if (last_desc >= q->q.size) - last_desc -= q->q.size; - q->q.sdesc[last_desc].skb = skb; - q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; + sgl_sdesc->skb = skb; } txq_advance(&q->q, ndesc); @@ -1725,12 +1720,12 @@ static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb) static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) { - dma_addr_t addr[MAX_SKB_FRAGS + 1]; + unsigned int last_desc, flits, ndesc; const struct skb_shared_info *ssi; struct fw_eth_tx_pkt_vm_wr *wr; + struct tx_sw_desc *sgl_sdesc; struct cpl_tx_pkt_core *cpl; const struct port_info *pi; - unsigned int flits, ndesc; struct sge_eth_txq *txq; struct adapter *adapter; int qidx, credits, ret; @@ -1782,12 +1777,19 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } + last_desc = txq->q.pidx + ndesc - 1; + if (last_desc >= txq->q.size) + last_desc -= txq->q.size; + sgl_sdesc = &txq->q.sdesc[last_desc]; + if (!t4vf_is_eth_imm(skb) && - unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) { + unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, + sgl_sdesc->addr) < 0)) { /* We need to map the skb into PCI DMA space (because it can't * be in-lined directly into the Work Request) and the mapping * operation failed. Record the error and drop the packet. */ + memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); txq->mapping_err++; goto out_free; } @@ -1962,7 +1964,6 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, */ struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); struct sge_txq *tq = &txq->q; - int last_desc; /* If the Work Request header was an exact multiple of our TX * Descriptor length, then it's possible that the starting SGL @@ -1976,14 +1977,9 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, ((void *)end - (void *)tq->stat)); } - cxgb4_write_sgl(skb, tq, sgl, end, 0, addr); + cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr); skb_orphan(skb); - - last_desc = tq->pidx + ndesc - 1; - if (last_desc >= tq->size) - last_desc -= tq->size; - tq->sdesc[last_desc].skb = skb; - tq->sdesc[last_desc].sgl = sgl; + sgl_sdesc->skb = skb; } /* Advance our internal TX Queue state, tell the hardware about @@ -2035,7 +2031,7 @@ static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max) void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *eosw_txq, u32 ndesc) { - struct sge_eosw_desc *d; + struct tx_sw_desc *d; d = &eosw_txq->desc[eosw_txq->last_cidx]; while (ndesc--) { @@ -2081,7 +2077,8 @@ static inline u8 ethofld_calc_tx_flits(struct adapter *adap, u32 wrlen; wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core); - if (skb_shinfo(skb)->gso_size) + if (skb_shinfo(skb)->gso_size && + !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) wrlen += sizeof(struct cpl_tx_pkt_lso_core); wrlen += roundup(hdr_len, 16); @@ -2089,10 +2086,14 @@ static inline u8 ethofld_calc_tx_flits(struct adapter *adap, /* Packet headers + WR + CPLs */ flits = DIV_ROUND_UP(wrlen, 8); - if (skb_shinfo(skb)->nr_frags > 0) - nsgl = sgl_len(skb_shinfo(skb)->nr_frags); - else if (skb->len - hdr_len) + if (skb_shinfo(skb)->nr_frags > 0) { + if (skb_headlen(skb) - hdr_len) + nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1); + else + nsgl = sgl_len(skb_shinfo(skb)->nr_frags); + } else if (skb->len - hdr_len) { nsgl = sgl_len(1); + } return flits + nsgl; } @@ -2106,16 +2107,16 @@ static inline void *write_eo_wr(struct adapter *adap, struct cpl_tx_pkt_core *cpl; u32 immd_len, wrlen16; bool compl = false; + u8 ver, proto; + + ver = ip_hdr(skb)->version; + proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol; wrlen16 = DIV_ROUND_UP(wrlen, 16); immd_len = sizeof(struct cpl_tx_pkt_core); - if (skb_shinfo(skb)->gso_size) { - if (skb->encapsulation && - CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) - immd_len += sizeof(struct cpl_tx_tnl_lso); - else - immd_len += sizeof(struct cpl_tx_pkt_lso_core); - } + if (skb_shinfo(skb)->gso_size && + !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) + immd_len += sizeof(struct cpl_tx_pkt_lso_core); immd_len += hdr_len; if (!eosw_txq->ncompl || @@ -2131,23 +2132,27 @@ static inline void *write_eo_wr(struct adapter *adap, wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) | FW_WR_FLOWID_V(eosw_txq->hwtid)); wr->r3 = 0; - wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; - wr->u.tcpseg.ethlen = skb_network_offset(skb); - wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); - wr->u.tcpseg.tcplen = tcp_hdrlen(skb); - wr->u.tcpseg.tsclk_tsoff = 0; - wr->u.tcpseg.r4 = 0; - wr->u.tcpseg.r5 = 0; - wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); - - if (ssi->gso_size) { - struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); - - wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); - cpl = write_tso_wr(adap, skb, lso); + if (proto == IPPROTO_UDP) { + cpl = write_eo_udp_wr(skb, wr, hdr_len); } else { - wr->u.tcpseg.mss = cpu_to_be16(0xffff); - cpl = (void *)(wr + 1); + wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; + wr->u.tcpseg.ethlen = skb_network_offset(skb); + wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); + wr->u.tcpseg.tcplen = tcp_hdrlen(skb); + wr->u.tcpseg.tsclk_tsoff = 0; + wr->u.tcpseg.r4 = 0; + wr->u.tcpseg.r5 = 0; + wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); + + if (ssi->gso_size) { + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); + + wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); + cpl = write_tso_wr(adap, skb, lso); + } else { + wr->u.tcpseg.mss = cpu_to_be16(0xffff); + cpl = (void *)(wr + 1); + } } eosw_txq->cred -= wrlen16; @@ -2167,7 +2172,7 @@ static void ethofld_hard_xmit(struct net_device *dev, struct cpl_tx_pkt_core *cpl; struct fw_eth_tx_eo_wr *wr; bool skip_eotx_wr = false; - struct sge_eosw_desc *d; + struct tx_sw_desc *d; struct sk_buff *skb; u8 flits, ndesc; int left; @@ -2257,6 +2262,19 @@ write_wr_headers: d->addr); } + if (skb_shinfo(skb)->gso_size) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + eohw_txq->uso++; + else + eohw_txq->tso++; + eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + eohw_txq->tx_cso++; + } + + if (skb_vlan_tag_present(skb)) + eohw_txq->vlan_ins++; + txq_advance(&eohw_txq->q, ndesc); cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); @@ -2283,7 +2301,7 @@ static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) case CXGB4_EO_STATE_CLOSED: default: return; - }; + } while (pktcount--) { skb = eosw_txq_peek(eosw_txq); @@ -4369,7 +4387,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, txq->q.q_type = CXGB4_TXQ_ETH; init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); txq->txq = netdevq; - txq->tso = txq->tx_cso = txq->vlan_ins = 0; + txq->tso = 0; + txq->uso = 0; + txq->tx_cso = 0; + txq->vlan_ins = 0; txq->mapping_err = 0; txq->dbqt = dbqt; @@ -4538,6 +4559,7 @@ int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq, spin_lock_init(&txq->lock); txq->adap = adap; txq->tso = 0; + txq->uso = 0; txq->tx_cso = 0; txq->vlan_ins = 0; txq->mapping_err = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index f2a7824da42b..19d18acfc9a6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -8777,8 +8777,8 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, unsigned int *speedp, unsigned int *mtup) { unsigned int fw_caps = pi->adapter->params.fw_caps_support; - struct fw_port_cmd port_cmd; unsigned int action, link_ok, mtu; + struct fw_port_cmd port_cmd; fw_port_cap32_t linkattr; int ret; @@ -8813,9 +8813,12 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32)); } - *link_okp = link_ok; - *speedp = fwcap_to_speed(linkattr); - *mtup = mtu; + if (link_okp) + *link_okp = link_ok; + if (speedp) + *speedp = fwcap_to_speed(linkattr); + if (mtup) + *mtup = mtu; return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 414e5cca293e..ac4fb43bdec6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -536,7 +536,8 @@ struct fw_eth_tx_pkt_wr { }; enum fw_eth_tx_eo_type { - FW_ETH_TX_EO_TYPE_TCPSEG = 1, + FW_ETH_TX_EO_TYPE_UDPSEG = 0, + FW_ETH_TX_EO_TYPE_TCPSEG, }; struct fw_eth_tx_eo_wr { @@ -544,6 +545,17 @@ struct fw_eth_tx_eo_wr { __be32 equiq_to_len16; __be64 r3; union fw_eth_tx_eo { + struct fw_eth_tx_eo_udpseg { + __u8 type; + __u8 ethlen; + __be16 iplen; + __u8 udplen; + __u8 rtplen; + __be16 r4; + __be16 mss; + __be16 schedpktsize; + __be32 plen; + } udpseg; struct fw_eth_tx_eo_tcpseg { __u8 type; __u8 ethlen; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 11538e59f587..7ff147e89426 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1807,11 +1807,8 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog) if (prog && !xdp_mtu_valid(priv, dev->mtu)) return -EINVAL; - if (prog) { - prog = bpf_prog_add(prog, priv->num_channels); - if (IS_ERR(prog)) - return PTR_ERR(prog); - } + if (prog) + bpf_prog_add(prog, priv->num_channels); up = netif_running(dev); need_update = (!!priv->xdp_prog != !!prog); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index f6b00c68451b..27f6fd1708f0 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1436,7 +1436,7 @@ int enetc_close(struct net_device *ndev) return 0; } -int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) +static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct tc_mqprio_qopt *mqprio = type_data; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b886b075650e..05c1899f6628 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3636,6 +3636,11 @@ fec_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); struct device_node *np = pdev->dev.of_node; + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) + return ret; cancel_work_sync(&fep->tx_timeout_work); fec_ptp_stop(pdev); @@ -3643,15 +3648,17 @@ fec_drv_remove(struct platform_device *pdev) fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); - pm_runtime_put(&pdev->dev); - pm_runtime_disable(&pdev->dev); - clk_disable_unprepare(fep->clk_ahb); - clk_disable_unprepare(fep->clk_ipg); + if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); free_netdev(ndev); + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; } diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig index 245d9a68a71f..7f20840fde07 100644 --- a/drivers/net/ethernet/freescale/fs_enet/Kconfig +++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only config FS_ENET - tristate "Freescale Ethernet Driver" - depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) - select MII - select PHYLIB + tristate "Freescale Ethernet Driver" + depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) + select MII + select PHYLIB config FS_ENET_MPC5121_FEC def_bool y if (FS_ENET && PPC_MPC512x) diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 0a9a7ee2a866..f4889431f9b7 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -393,12 +393,13 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc, static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, u64 iov_offset, u64 iov_len) { + u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; + u64 first_page = iov_offset / PAGE_SIZE; dma_addr_t dma; - u64 addr; + u64 page; - for (addr = iov_offset; addr < iov_offset + iov_len; - addr += PAGE_SIZE) { - dma = page_buses[addr / PAGE_SIZE]; + for (page = first_page; page <= last_page; page++) { + dma = page_buses[page]; dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE); } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 88f6c4cbac7c..ebb4c6e9aed3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -165,7 +165,7 @@ enum HLCGE_PORT_TYPE { #define HCLGE_GLOBAL_RESET_BIT 0 #define HCLGE_CORE_RESET_BIT 1 #define HCLGE_IMP_RESET_BIT 2 -#define HCLGE_RESET_INT_M GENMASK(2, 0) +#define HCLGE_RESET_INT_M GENMASK(7, 5) #define HCLGE_FUN_RST_ING 0x20C00 #define HCLGE_FUN_RST_ING_B 0 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index f59d9a8e35e2..0686ded7ad3a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2878,10 +2878,15 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, if (test_bit(0, &adapter->resetting) && adapter->reset_reason == VNIC_RESET_MOBILITY) { - struct irq_desc *desc = irq_to_desc(scrq->irq); - struct irq_chip *chip = irq_desc_get_chip(desc); + u64 val = (0xff000000) | scrq->hw_irq; - chip->irq_eoi(&desc->irq_data); + rc = plpar_hcall_norets(H_EOI, val); + /* H_EOI would fail with rc = H_FUNCTION when running + * in XIVE mode which is expected, but not an error. + */ + if (rc && (rc != H_FUNCTION)) + dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", + val, rc); } rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 8d7e8fc55585..f972dce8aebb 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -130,6 +130,8 @@ extern const char ice_drv_ver[]; ICE_PROMISC_VLAN_TX | \ ICE_PROMISC_VLAN_RX) +#define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) + struct ice_txq_meta { u32 q_teid; /* Tx-scheduler element identifier */ u16 q_id; /* Entry in VSI's txq_map bitmap */ @@ -283,6 +285,8 @@ struct ice_vsi { u16 num_txq; /* Used Tx queues */ u16 alloc_rxq; /* Allocated Rx queues */ u16 num_rxq; /* Used Rx queues */ + u16 req_txq; /* User requested Tx queues */ + u16 req_rxq; /* User requested Rx queues */ u16 num_rx_desc; u16 num_tx_desc; struct ice_tc_cfg tc_cfg; @@ -489,6 +493,7 @@ void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf); +int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx); void ice_update_vsi_stats(struct ice_vsi *vsi); void ice_update_pf_stats(struct ice_pf *pf); int ice_up(struct ice_vsi *vsi); @@ -503,6 +508,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); +int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); int ice_open(struct net_device *netdev); int ice_stop(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 69d2da14fe5c..77d6a0291e97 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -101,7 +101,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) struct ice_q_vector *q_vector; /* allocate q_vector */ - q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); + q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector), + GFP_KERNEL); if (!q_vector) return -ENOMEM; @@ -138,10 +139,11 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) struct ice_q_vector *q_vector; struct ice_pf *pf = vsi->back; struct ice_ring *ring; + struct device *dev; + dev = ice_pf_to_dev(pf); if (!vsi->q_vectors[v_idx]) { - dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n", - v_idx); + dev_dbg(dev, "Queue vector at index %d not found\n", v_idx); return; } q_vector = vsi->q_vectors[v_idx]; @@ -155,7 +157,7 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) if (vsi->netdev) netif_napi_del(&q_vector->napi); - devm_kfree(&pf->pdev->dev, q_vector); + devm_kfree(dev, q_vector); vsi->q_vectors[v_idx] = NULL; } @@ -482,7 +484,7 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) /* wait for the change to finish */ ret = ice_pf_rxq_wait(pf, pf_q, ena); if (ret) - dev_err(&pf->pdev->dev, + dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n", vsi->idx, pf_q, (ena ? "en" : "dis")); @@ -500,11 +502,12 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; int v_idx = 0, num_q_vectors; + struct device *dev; int err; + dev = ice_pf_to_dev(pf); if (vsi->q_vectors[0]) { - dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", - vsi->vsi_num); + dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num); return -EEXIST; } @@ -522,8 +525,7 @@ err_out: while (v_idx--) ice_free_q_vector(vsi, v_idx); - dev_err(&pf->pdev->dev, - "Failed to allocate %d q_vector for VSI %d, ret=%d\n", + dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n", vsi->num_q_vectors, vsi->vsi_num, err); vsi->num_q_vectors = 0; return err; @@ -640,7 +642,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, 1, qg_buf, buf_len, NULL); if (status) { - dev_err(&pf->pdev->dev, + dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n", status); return -ENODEV; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 36be501ae623..fb1d930470c7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1673,6 +1673,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, caps->valid_functions); + + /* store func count for resource management purposes */ + if (dev_p) + dev_p->num_funcs = hweight32(number); break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); @@ -1779,6 +1783,18 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, break; } } + + /* Re-calculate capabilities that are dependent on the number of + * physical ports; i.e. some features are not supported or function + * differently on devices with more than 4 ports. + */ + if (hw->dev_caps.num_funcs > 4) { + /* Max 4 TCs per port */ + caps->maxtc = 4; + ice_debug(hw, ICE_DBG_INIT, + "%s: maxtc = %d (based on #ports)\n", prefix, + caps->maxtc); + } } /** @@ -1875,8 +1891,7 @@ void ice_set_safe_mode_caps(struct ice_hw *hw) struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; u32 valid_func, rxq_first_id, txq_first_id; u32 msix_vector_first_id, max_mtu; - u32 num_func = 0; - u8 i; + u32 num_funcs; /* cache some func_caps values that should be restored after memset */ valid_func = func_caps->common_cap.valid_functions; @@ -1909,6 +1924,7 @@ void ice_set_safe_mode_caps(struct ice_hw *hw) rxq_first_id = dev_caps->common_cap.rxq_first_id; msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id; max_mtu = dev_caps->common_cap.max_mtu; + num_funcs = dev_caps->num_funcs; /* unset dev capabilities */ memset(dev_caps, 0, sizeof(*dev_caps)); @@ -1919,19 +1935,14 @@ void ice_set_safe_mode_caps(struct ice_hw *hw) dev_caps->common_cap.rxq_first_id = rxq_first_id; dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id; dev_caps->common_cap.max_mtu = max_mtu; - - /* valid_func is a bitmap. get number of functions */ -#define ICE_MAX_FUNCS 8 - for (i = 0; i < ICE_MAX_FUNCS; i++) - if (valid_func & BIT(i)) - num_func++; + dev_caps->num_funcs = num_funcs; /* one Tx and one Rx queue per function in safe mode */ - dev_caps->common_cap.num_rxq = num_func; - dev_caps->common_cap.num_txq = num_func; + dev_caps->common_cap.num_rxq = num_funcs; + dev_caps->common_cap.num_txq = num_funcs; /* two MSIX vectors per function */ - dev_caps->common_cap.num_msix_vectors = 2 * num_func; + dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index 4df9da359135..bf0ebe6149e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -22,7 +22,7 @@ */ #define EXP_FW_API_VER_BRANCH 0x00 #define EXP_FW_API_VER_MAJOR 0x01 -#define EXP_FW_API_VER_MINOR 0x03 +#define EXP_FW_API_VER_MINOR 0x05 /* Different control queue types: These are mainly for SW consumption. */ enum ice_ctl_q { diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 1150dbd98d0b..d3d3ec29def9 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -4,8 +4,6 @@ #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" -static void ice_pf_dcb_recfg(struct ice_pf *pf); - /** * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration * @vsi: the VSI being configured @@ -160,6 +158,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) { struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_dcbx_cfg *old_cfg, *curr_cfg; + struct device *dev = ice_pf_to_dev(pf); int ret = ICE_DCB_NO_HW_CHG; struct ice_vsi *pf_vsi; @@ -171,15 +170,15 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) /* Enable DCB tagging only when more than one TC */ if (ice_dcb_get_num_tc(new_cfg) > 1) { - dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n"); + dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n"); set_bit(ICE_FLAG_DCB_ENA, pf->flags); } else { - dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n"); + dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n"); clear_bit(ICE_FLAG_DCB_ENA, pf->flags); } if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) { - dev_dbg(&pf->pdev->dev, "No change in DCB config required\n"); + dev_dbg(dev, "No change in DCB config required\n"); return ret; } @@ -188,10 +187,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) if (!old_cfg) return -ENOMEM; - dev_info(&pf->pdev->dev, "Commit DCB Configuration to the hardware\n"); + dev_info(dev, "Commit DCB Configuration to the hardware\n"); pf_vsi = ice_get_main_vsi(pf); if (!pf_vsi) { - dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n"); + dev_dbg(dev, "PF VSI doesn't exist\n"); ret = -EINVAL; goto free_cfg; } @@ -213,7 +212,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) if (pf->hw.port_info->is_sw_lldp) { ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { - dev_err(&pf->pdev->dev, "Set DCB Config failed\n"); + dev_err(dev, "Set DCB Config failed\n"); /* Restore previous settings to local config */ memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg)); goto out; @@ -222,7 +221,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { - dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); + dev_err(dev, "Query Port ETS failed\n"); goto out; } @@ -269,6 +268,7 @@ static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, struct ice_dcbx_cfg *new_cfg) { + struct device *dev = ice_pf_to_dev(pf); bool need_reconfig = false; /* Check if ETS configuration has changed */ @@ -279,33 +279,33 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, &old_cfg->etscfg.prio_table, sizeof(new_cfg->etscfg.prio_table))) { need_reconfig = true; - dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); + dev_dbg(dev, "ETS UP2TC changed.\n"); } if (memcmp(&new_cfg->etscfg.tcbwtable, &old_cfg->etscfg.tcbwtable, sizeof(new_cfg->etscfg.tcbwtable))) - dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); + dev_dbg(dev, "ETS TC BW Table changed.\n"); if (memcmp(&new_cfg->etscfg.tsatable, &old_cfg->etscfg.tsatable, sizeof(new_cfg->etscfg.tsatable))) - dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); + dev_dbg(dev, "ETS TSA Table changed.\n"); } /* Check if PFC configuration has changed */ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { need_reconfig = true; - dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); + dev_dbg(dev, "PFC config change detected.\n"); } /* Check if APP Table has changed */ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { need_reconfig = true; - dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); + dev_dbg(dev, "APP Table change detected.\n"); } - dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); + dev_dbg(dev, "dcb need_reconfig=%d\n", need_reconfig); return need_reconfig; } @@ -317,11 +317,12 @@ void ice_dcb_rebuild(struct ice_pf *pf) { struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg; struct ice_aqc_port_ets_elem buf = { 0 }; + struct device *dev = ice_pf_to_dev(pf); enum ice_status ret; ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { - dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); + dev_err(dev, "Query Port ETS failed\n"); goto dcb_error; } @@ -340,17 +341,14 @@ void ice_dcb_rebuild(struct ice_pf *pf) ice_cfg_etsrec_defaults(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { - dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n"); + dev_err(dev, "Failed to set DCB to unwilling\n"); goto dcb_error; } /* Retrieve DCB config and ensure same as current in SW */ - prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg, - sizeof(*prev_cfg), GFP_KERNEL); - if (!prev_cfg) { - dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n"); + prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL); + if (!prev_cfg) goto dcb_error; - } ice_init_dcb(&pf->hw, true); if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS) @@ -360,12 +358,13 @@ void ice_dcb_rebuild(struct ice_pf *pf) if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) { /* difference in cfg detected - disable DCB till next MIB */ - dev_err(&pf->pdev->dev, "Set local MIB not accurate\n"); + dev_err(dev, "Set local MIB not accurate\n"); + kfree(prev_cfg); goto dcb_error; } /* fetched config congruent to previous configuration */ - devm_kfree(&pf->pdev->dev, prev_cfg); + kfree(prev_cfg); /* Set the local desired config */ if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE) @@ -375,27 +374,30 @@ void ice_dcb_rebuild(struct ice_pf *pf) ice_cfg_etsrec_defaults(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) { - dev_err(&pf->pdev->dev, "Failed to set desired config\n"); + dev_err(dev, "Failed to set desired config\n"); goto dcb_error; } - dev_info(&pf->pdev->dev, "DCB restored after reset\n"); + dev_info(dev, "DCB restored after reset\n"); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { - dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); + dev_err(dev, "Query Port ETS failed\n"); goto dcb_error; } return; dcb_error: - dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n"); - prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL); + dev_err(dev, "Disabling DCB until new settings occur\n"); + prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL); + if (!prev_cfg) + return; + prev_cfg->etscfg.willing = true; prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW; prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec)); ice_pf_dcb_cfg(pf, prev_cfg, false); - devm_kfree(&pf->pdev->dev, prev_cfg); + kfree(prev_cfg); } /** @@ -410,18 +412,17 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) int ret = 0; pi = pf->hw.port_info; - newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL); + newcfg = kmemdup(&pi->local_dcbx_cfg, sizeof(*newcfg), GFP_KERNEL); if (!newcfg) return -ENOMEM; - memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg)); memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg)); - dev_info(&pf->pdev->dev, "Configuring initial DCB values\n"); + dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n"); if (ice_pf_dcb_cfg(pf, newcfg, locked)) ret = -EINVAL; - devm_kfree(&pf->pdev->dev, newcfg); + kfree(newcfg); return ret; } @@ -442,9 +443,10 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) hw = &pf->hw; pi = hw->port_info; - dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL); + dcbcfg = kzalloc(sizeof(*dcbcfg), GFP_KERNEL); + if (!dcbcfg) + return -ENOMEM; - memset(dcbcfg, 0, sizeof(*dcbcfg)); memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg)); dcbcfg->etscfg.willing = ets_willing ? 1 : 0; @@ -465,7 +467,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE; ret = ice_pf_dcb_cfg(pf, dcbcfg, locked); - devm_kfree(&pf->pdev->dev, dcbcfg); + kfree(dcbcfg); if (ret) return ret; @@ -504,13 +506,13 @@ static bool ice_dcb_tc_contig(u8 *prio_table) static int ice_dcb_noncontig_cfg(struct ice_pf *pf) { struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; + struct device *dev = ice_pf_to_dev(pf); int ret; /* Configure SW DCB default with ETS non-willing */ ret = ice_dcb_sw_dflt_cfg(pf, false, true); if (ret) { - dev_err(&pf->pdev->dev, - "Failed to set local DCB config %d\n", ret); + dev_err(dev, "Failed to set local DCB config %d\n", ret); return ret; } @@ -518,7 +520,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) dcbcfg->etscfg.willing = 1; ret = ice_set_dcb_cfg(pf->hw.port_info); if (ret) - dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n"); + dev_err(dev, "Failed to set DCB to unwilling\n"); return ret; } @@ -531,7 +533,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) * calling this function. Reconfiguring DCB based on * local_dcbx_cfg. */ -static void ice_pf_dcb_recfg(struct ice_pf *pf) +void ice_pf_dcb_recfg(struct ice_pf *pf) { struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; u8 tc_map = 0; @@ -539,10 +541,12 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf) /* Update each VSI */ ice_for_each_vsi(pf, v) { - if (!pf->vsi[v]) + struct ice_vsi *vsi = pf->vsi[v]; + + if (!vsi) continue; - if (pf->vsi[v]->type == ICE_VSI_PF) { + if (vsi->type == ICE_VSI_PF) { tc_map = ice_dcb_get_ena_tc(dcbcfg); /* If DCBX request non-contiguous TC, then configure @@ -556,17 +560,16 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf) tc_map = ICE_DFLT_TRAFFIC_CLASS; } - ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map); + ret = ice_vsi_cfg_tc(vsi, tc_map); if (ret) { - dev_err(&pf->pdev->dev, - "Failed to config TC for VSI index: %d\n", - pf->vsi[v]->idx); + dev_err(ice_pf_to_dev(pf), "Failed to config TC for VSI index: %d\n", + vsi->idx); continue; } - ice_vsi_map_rings_to_vectors(pf->vsi[v]); - if (pf->vsi[v]->type == ICE_VSI_PF) - ice_dcbnl_set_all(pf->vsi[v]); + ice_vsi_map_rings_to_vectors(vsi); + if (vsi->type == ICE_VSI_PF) + ice_dcbnl_set_all(vsi); } } @@ -577,7 +580,7 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf) */ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) { - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); struct ice_port_info *port_info; struct ice_hw *hw = &pf->hw; int err; @@ -586,23 +589,22 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) err = ice_init_dcb(hw, false); if (err && !port_info->is_sw_lldp) { - dev_err(&pf->pdev->dev, "Error initializing DCB %d\n", err); + dev_err(dev, "Error initializing DCB %d\n", err); goto dcb_init_err; } - dev_info(&pf->pdev->dev, + dev_info(dev, "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n", pf->hw.func_caps.common_cap.maxtc); if (err) { struct ice_vsi *pf_vsi; /* FW LLDP is disabled, activate SW DCBX/LLDP mode */ - dev_info(&pf->pdev->dev, - "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); + dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); err = ice_dcb_sw_dflt_cfg(pf, true, locked); if (err) { - dev_err(&pf->pdev->dev, + dev_err(dev, "Failed to set local DCB config %d\n", err); err = -EIO; goto dcb_init_err; @@ -613,8 +615,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) */ pf_vsi = ice_get_main_vsi(pf); if (!pf_vsi) { - dev_err(&pf->pdev->dev, - "Failed to set local DCB config\n"); + dev_err(dev, "Failed to set local DCB config\n"); err = -EIO; goto dcb_init_err; } @@ -729,6 +730,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { struct ice_aqc_port_ets_elem buf = { 0 }; + struct device *dev = ice_pf_to_dev(pf); struct ice_aqc_lldp_get_mib *mib; struct ice_dcbx_cfg tmp_dcbx_cfg; bool need_reconfig = false; @@ -742,8 +744,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, return; if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) { - dev_dbg(&pf->pdev->dev, - "MIB Change Event in HOST mode\n"); + dev_dbg(dev, "MIB Change Event in HOST mode\n"); return; } @@ -752,21 +753,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, /* Ignore if event is not for Nearest Bridge */ type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & ICE_AQ_LLDP_BRID_TYPE_M); - dev_dbg(&pf->pdev->dev, "LLDP event MIB bridge type 0x%x\n", type); + dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type); if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) return; /* Check MIB Type and return if event for Remote MIB update */ type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; - dev_dbg(&pf->pdev->dev, - "LLDP event mib type %s\n", type ? "remote" : "local"); + dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local"); if (type == ICE_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, &pi->remote_dcbx_cfg); if (ret) { - dev_err(&pf->pdev->dev, "Failed to get remote DCB config\n"); + dev_err(dev, "Failed to get remote DCB config\n"); return; } } @@ -780,14 +780,13 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, /* Get updated DCBX data from firmware */ ret = ice_get_dcb_cfg(pf->hw.port_info); if (ret) { - dev_err(&pf->pdev->dev, "Failed to get DCB config\n"); + dev_err(dev, "Failed to get DCB config\n"); return; } /* No change detected in DCBX configs */ if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) { - dev_dbg(&pf->pdev->dev, - "No change detected in DCBX configuration.\n"); + dev_dbg(dev, "No change detected in DCBX configuration.\n"); return; } @@ -799,16 +798,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, /* Enable DCB tagging only when more than one TC */ if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) { - dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n"); + dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n"); set_bit(ICE_FLAG_DCB_ENA, pf->flags); } else { - dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n"); + dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n"); clear_bit(ICE_FLAG_DCB_ENA, pf->flags); } pf_vsi = ice_get_main_vsi(pf); if (!pf_vsi) { - dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n"); + dev_dbg(dev, "PF VSI doesn't exist\n"); return; } @@ -817,7 +816,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { - dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); + dev_err(dev, "Query Port ETS failed\n"); rtnl_unlock(); return; } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index e90e25b7da77..f15e5776f287 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -20,6 +20,7 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index); int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); +void ice_pf_dcb_recfg(struct ice_pf *pf); void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); int ice_init_pf_dcb(struct ice_pf *pf, bool locked); void ice_update_dcb_stats(struct ice_pf *pf); @@ -58,7 +59,7 @@ ice_dcb_get_tc(struct ice_vsi __always_unused *vsi, static inline int ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) { - dev_dbg(&pf->pdev->dev, "DCB not supported\n"); + dev_dbg(ice_pf_to_dev(pf), "DCB not supported\n"); return -EOPNOTSUPP; } @@ -78,6 +79,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, } #define ice_update_dcb_stats(pf) do {} while (0) +#define ice_pf_dcb_recfg(pf) do {} while (0) #define ice_vsi_cfg_dcb_rings(vsi) do {} while (0) #define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0) #define ice_set_cgd_num(tlan_ctx, ring) do {} while (0) diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 3c90fc0a3feb..d870c1aedc17 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -179,7 +179,7 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) else pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; - dev_info(&pf->pdev->dev, "DCBx mode = 0x%x\n", mode); + dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); return ICE_DCB_HW_CHG_RST; } @@ -297,7 +297,7 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) return; *setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1; - dev_dbg(&pf->pdev->dev, + dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n", prio, *setting, pi->local_dcbx_cfg.pfc.pfcena); } @@ -328,7 +328,7 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set) else new_cfg->pfc.pfcena &= ~BIT(prio); - dev_dbg(&pf->pdev->dev, "Set PFC config UP:%d set:%d pfcena:0x%x\n", + dev_dbg(ice_pf_to_dev(pf), "Set PFC config UP:%d set:%d pfcena:0x%x\n", prio, set, new_cfg->pfc.pfcena); } @@ -359,7 +359,7 @@ static u8 ice_dcbnl_getstate(struct net_device *netdev) state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); - dev_dbg(&pf->pdev->dev, "DCB enabled state = %d\n", state); + dev_dbg(ice_pf_to_dev(pf), "DCB enabled state = %d\n", state); return state; } @@ -418,7 +418,7 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio, return; *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio]; - dev_dbg(&pf->pdev->dev, + dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio, *pgid); } @@ -479,7 +479,7 @@ ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct) return; *bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid]; - dev_dbg(&pf->pdev->dev, "Get PG BW config tc=%d bw_pct=%d\n", + dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n", pgid, *bw_pct); } @@ -597,7 +597,7 @@ static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) break; } - dev_dbg(&pf->pdev->dev, "DCBX Get Capability cap=%d capval=0x%x\n", + dev_dbg(ice_pf_to_dev(pf), "DCBX Get Capability cap=%d capval=0x%x\n", capid, *cap); return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 1f00091f7906..aec3c6c379df 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -248,7 +248,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, int ret = 0; u16 *buf; - dev = &pf->pdev->dev; + dev = ice_pf_to_dev(pf); eeprom->magic = hw->vendor_id | (hw->device_id << 16); @@ -343,6 +343,7 @@ static u64 ice_eeprom_test(struct net_device *netdev) static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) { struct ice_pf *pf = (struct ice_pf *)hw->back; + struct device *dev = ice_pf_to_dev(pf); static const u32 patterns[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF @@ -358,7 +359,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) val = rd32(hw, reg); if (val == pattern) continue; - dev_err(&pf->pdev->dev, + dev_err(dev, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n" , __func__, reg, pattern, val); return 1; @@ -367,7 +368,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) wr32(hw, reg, orig_val); val = rd32(hw, reg); if (val != orig_val) { - dev_err(&pf->pdev->dev, + dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n" , __func__, reg, orig_val, val); return 1; @@ -507,7 +508,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size) if (!pf) return -EINVAL; - data = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); + data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL); if (!data) return -ENOMEM; @@ -649,9 +650,11 @@ static u64 ice_loopback_test(struct net_device *netdev) u8 broadcast[ETH_ALEN], ret = 0; int num_frames, valid_frames; LIST_HEAD(tmp_list); + struct device *dev; u8 *tx_frame; int i; + dev = ice_pf_to_dev(pf); netdev_info(netdev, "loopback test\n"); test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info); @@ -712,12 +715,12 @@ static u64 ice_loopback_test(struct net_device *netdev) ret = 10; lbtest_free_frame: - devm_kfree(&pf->pdev->dev, tx_frame); + devm_kfree(dev, tx_frame); remove_mac_filters: if (ice_remove_mac(&pf->hw, &tmp_list)) netdev_err(netdev, "Could not remove MAC filter for the test VSI"); free_mac_list: - ice_free_fltr_list(&pf->pdev->dev, &tmp_list); + ice_free_fltr_list(dev, &tmp_list); lbtest_mac_dis: /* Disable MAC loopback after the test is completed. */ if (ice_aq_set_mac_loopback(&pf->hw, false, NULL)) @@ -774,6 +777,9 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, struct ice_netdev_priv *np = netdev_priv(netdev); bool if_running = netif_running(netdev); struct ice_pf *pf = np->vsi->back; + struct device *dev; + + dev = ice_pf_to_dev(pf); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { netdev_info(netdev, "offline testing starting\n"); @@ -781,7 +787,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, set_bit(__ICE_TESTING, pf->state); if (ice_active_vfs(pf)) { - dev_warn(&pf->pdev->dev, + dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); data[ICE_ETH_TEST_REG] = 1; data[ICE_ETH_TEST_EEPROM] = 1; @@ -816,8 +822,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, int status = ice_open(netdev); if (status) { - dev_err(&pf->pdev->dev, - "Could not open device %s, err %d", + dev_err(dev, "Could not open device %s, err %d", pf->int_name, status); } } @@ -962,7 +967,7 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec) } /* Get last SW configuration */ - caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + caps = kzalloc(sizeof(*caps), GFP_KERNEL); if (!caps) return -ENOMEM; @@ -1007,7 +1012,7 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec) } done: - devm_kfree(&vsi->back->pdev->dev, caps); + kfree(caps); return err; } @@ -1083,7 +1088,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) break; } - caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + caps = kzalloc(sizeof(*caps), GFP_KERNEL); if (!caps) return -ENOMEM; @@ -1110,7 +1115,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) fecparam->fec |= ETHTOOL_FEC_OFF; done: - devm_kfree(&vsi->back->pdev->dev, caps); + kfree(caps); return err; } @@ -1155,12 +1160,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; + struct device *dev; int ret = 0; u32 i; if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE)) return -EINVAL; + dev = ice_pf_to_dev(pf); set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS); @@ -1189,7 +1196,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) * events to respond to. */ if (status) - dev_info(&pf->pdev->dev, + dev_info(dev, "Failed to unreg for LLDP events\n"); /* The AQ call to stop the FW LLDP agent will generate @@ -1197,15 +1204,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) */ status = ice_aq_stop_lldp(&pf->hw, true, true, NULL); if (status) - dev_warn(&pf->pdev->dev, - "Fail to stop LLDP agent\n"); + dev_warn(dev, "Fail to stop LLDP agent\n"); /* Use case for having the FW LLDP agent stopped * will likely not need DCB, so failure to init is * not a concern of ethtool */ status = ice_init_pf_dcb(pf, true); if (status) - dev_warn(&pf->pdev->dev, "Fail to init DCB\n"); + dev_warn(dev, "Fail to init DCB\n"); } else { enum ice_status status; bool dcbx_agent_status; @@ -1215,8 +1221,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) */ status = ice_aq_start_lldp(&pf->hw, true, NULL); if (status) - dev_warn(&pf->pdev->dev, - "Fail to start LLDP Agent\n"); + dev_warn(dev, "Fail to start LLDP Agent\n"); /* AQ command to start FW DCBX agent will fail if * the agent is already started @@ -1225,10 +1230,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) &dcbx_agent_status, NULL); if (status) - dev_dbg(&pf->pdev->dev, - "Failed to start FW DCBX\n"); + dev_dbg(dev, "Failed to start FW DCBX\n"); - dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n", + dev_info(dev, "FW DCBX agent is %s\n", dcbx_agent_status ? "ACTIVE" : "DISABLED"); /* Failure to configure MIB change or init DCB is not @@ -1238,7 +1242,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) */ status = ice_init_pf_dcb(pf, true); if (status) - dev_dbg(&pf->pdev->dev, "Fail to init DCB\n"); + dev_dbg(dev, "Fail to init DCB\n"); /* Remove rule to direct LLDP packets to default VSI. * The FW LLDP engine will now be consuming them. @@ -1248,7 +1252,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) /* Register for MIB change events */ status = ice_cfg_lldp_mib_change(&pf->hw, true); if (status) - dev_dbg(&pf->pdev->dev, + dev_dbg(dev, "Fail to enable MIB change events\n"); } } @@ -2141,7 +2145,7 @@ ice_get_link_ksettings(struct net_device *netdev, /* flow control is symmetric and always supported */ ethtool_link_ksettings_add_link_mode(ks, supported, Pause); - caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + caps = kzalloc(sizeof(*caps), GFP_KERNEL); if (!caps) return -ENOMEM; @@ -2199,7 +2203,7 @@ ice_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); done: - devm_kfree(&vsi->back->pdev->dev, caps); + kfree(caps); return err; } @@ -2428,8 +2432,7 @@ ice_set_link_ksettings(struct net_device *netdev, usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); } - abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities), - GFP_KERNEL); + abilities = kzalloc(sizeof(*abilities), GFP_KERNEL); if (!abilities) return -ENOMEM; @@ -2521,7 +2524,7 @@ ice_set_link_ksettings(struct net_device *netdev, } done: - devm_kfree(&pf->pdev->dev, abilities); + kfree(abilities); clear_bit(__ICE_CFG_BUSY, pf->state); return err; @@ -2649,8 +2652,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n", vsi->tx_rings[0]->count, new_tx_cnt); - tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, - sizeof(*tx_rings), GFP_KERNEL); + tx_rings = kcalloc(vsi->num_txq, sizeof(*tx_rings), GFP_KERNEL); if (!tx_rings) { err = -ENOMEM; goto done; @@ -2666,7 +2668,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) if (err) { while (i--) ice_clean_tx_ring(&tx_rings[i]); - devm_kfree(&pf->pdev->dev, tx_rings); + kfree(tx_rings); goto done; } } @@ -2678,8 +2680,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) netdev_info(netdev, "Changing XDP descriptor count from %d to %d\n", vsi->xdp_rings[0]->count, new_tx_cnt); - xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq, - sizeof(*xdp_rings), GFP_KERNEL); + xdp_rings = kcalloc(vsi->num_xdp_txq, sizeof(*xdp_rings), GFP_KERNEL); if (!xdp_rings) { err = -ENOMEM; goto free_tx; @@ -2695,7 +2696,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) if (err) { while (i--) ice_clean_tx_ring(&xdp_rings[i]); - devm_kfree(&pf->pdev->dev, xdp_rings); + kfree(xdp_rings); goto free_tx; } ice_set_ring_xdp(&xdp_rings[i]); @@ -2709,8 +2710,7 @@ process_rx: netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n", vsi->rx_rings[0]->count, new_rx_cnt); - rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq, - sizeof(*rx_rings), GFP_KERNEL); + rx_rings = kcalloc(vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL); if (!rx_rings) { err = -ENOMEM; goto done; @@ -2740,7 +2740,7 @@ rx_unwind: i--; ice_free_rx_ring(&rx_rings[i]); } - devm_kfree(&pf->pdev->dev, rx_rings); + kfree(rx_rings); err = -ENOMEM; goto free_tx; } @@ -2758,7 +2758,7 @@ process_link: ice_free_tx_ring(vsi->tx_rings[i]); *vsi->tx_rings[i] = tx_rings[i]; } - devm_kfree(&pf->pdev->dev, tx_rings); + kfree(tx_rings); } if (rx_rings) { @@ -2776,7 +2776,7 @@ process_link: rx_rings[i].next_to_alloc = 0; *vsi->rx_rings[i] = rx_rings[i]; } - devm_kfree(&pf->pdev->dev, rx_rings); + kfree(rx_rings); } if (xdp_rings) { @@ -2784,7 +2784,7 @@ process_link: ice_free_tx_ring(vsi->xdp_rings[i]); *vsi->xdp_rings[i] = xdp_rings[i]; } - devm_kfree(&pf->pdev->dev, xdp_rings); + kfree(xdp_rings); } vsi->num_tx_desc = new_tx_cnt; @@ -2798,7 +2798,7 @@ free_tx: if (tx_rings) { ice_for_each_txq(vsi, i) ice_free_tx_ring(&tx_rings[i]); - devm_kfree(&pf->pdev->dev, tx_rings); + kfree(tx_rings); } done: @@ -2846,7 +2846,6 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_port_info *pi = np->vsi->port_info; struct ice_aqc_get_phy_caps_data *pcaps; - struct ice_vsi *vsi = np->vsi; struct ice_dcbx_cfg *dcbx_cfg; enum ice_status status; @@ -2856,8 +2855,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) dcbx_cfg = &pi->local_dcbx_cfg; - pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps), - GFP_KERNEL); + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return; @@ -2880,7 +2878,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) pause->rx_pause = 1; out: - devm_kfree(&vsi->back->pdev->dev, pcaps); + kfree(pcaps); } /** @@ -3061,7 +3059,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) return -EIO; } - lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; @@ -3074,7 +3072,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) indir[i] = (u32)(lut[i]); out: - devm_kfree(&pf->pdev->dev, lut); + kfree(lut); return ret; } @@ -3095,8 +3093,10 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; + struct device *dev; u8 *seed = NULL; + dev = ice_pf_to_dev(pf); if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; @@ -3109,8 +3109,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, if (key) { if (!vsi->rss_hkey_user) { vsi->rss_hkey_user = - devm_kzalloc(&pf->pdev->dev, - ICE_VSIQF_HKEY_ARRAY_SIZE, + devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE, GFP_KERNEL); if (!vsi->rss_hkey_user) return -ENOMEM; @@ -3120,8 +3119,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, } if (!vsi->rss_lut_user) { - vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev, - vsi->rss_table_size, + vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size, GFP_KERNEL); if (!vsi->rss_lut_user) return -ENOMEM; @@ -3144,6 +3142,188 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, return 0; } +/** + * ice_get_max_txq - return the maximum number of Tx queues for in a PF + * @pf: PF structure + */ +static int ice_get_max_txq(struct ice_pf *pf) +{ + return min_t(int, num_online_cpus(), + pf->hw.func_caps.common_cap.num_txq); +} + +/** + * ice_get_max_rxq - return the maximum number of Rx queues for in a PF + * @pf: PF structure + */ +static int ice_get_max_rxq(struct ice_pf *pf) +{ + return min_t(int, num_online_cpus(), + pf->hw.func_caps.common_cap.num_rxq); +} + +/** + * ice_get_combined_cnt - return the current number of combined channels + * @vsi: PF VSI pointer + * + * Go through all queue vectors and count ones that have both Rx and Tx ring + * attached + */ +static u32 ice_get_combined_cnt(struct ice_vsi *vsi) +{ + u32 combined = 0; + int q_idx; + + ice_for_each_q_vector(vsi, q_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring && q_vector->tx.ring) + combined++; + } + + return combined; +} + +/** + * ice_get_channels - get the current and max supported channels + * @dev: network interface device structure + * @ch: ethtool channel data structure + */ +static void +ice_get_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + + /* check to see if VSI is active */ + if (test_bit(__ICE_DOWN, vsi->state)) + return; + + /* report maximum channels */ + ch->max_rx = ice_get_max_rxq(pf); + ch->max_tx = ice_get_max_txq(pf); + ch->max_combined = min_t(int, ch->max_rx, ch->max_tx); + + /* report current channels */ + ch->combined_count = ice_get_combined_cnt(vsi); + ch->rx_count = vsi->num_rxq - ch->combined_count; + ch->tx_count = vsi->num_txq - ch->combined_count; +} + +/** + * ice_vsi_set_dflt_rss_lut - set default RSS LUT with requested RSS size + * @vsi: VSI to reconfigure RSS LUT on + * @req_rss_size: requested range of queue numbers for hashing + * + * Set the VSI's RSS parameters, configure the RSS LUT based on these. + */ +static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) +{ + struct ice_pf *pf = vsi->back; + enum ice_status status; + struct device *dev; + struct ice_hw *hw; + int err = 0; + u8 *lut; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + + if (!req_rss_size) + return -EINVAL; + + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + /* set RSS LUT parameters */ + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { + vsi->rss_size = 1; + } else { + struct ice_hw_common_caps *caps = &hw->func_caps.common_cap; + + vsi->rss_size = min_t(int, req_rss_size, + BIT(caps->rss_table_entry_width)); + } + + /* create/set RSS LUT */ + ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size); + status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, + vsi->rss_table_size); + if (status) { + dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n", + status, hw->adminq.rq_last_status); + err = -EIO; + } + + kfree(lut); + return err; +} + +/** + * ice_set_channels - set the number channels + * @dev: network interface device structure + * @ch: ethtool channel data structure + */ +static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + int new_rx = 0, new_tx = 0; + u32 curr_combined; + + /* do not support changing channels in Safe Mode */ + if (ice_is_safe_mode(pf)) { + netdev_err(dev, "Changing channel in Safe Mode is not supported\n"); + return -EOPNOTSUPP; + } + /* do not support changing other_count */ + if (ch->other_count) + return -EINVAL; + + curr_combined = ice_get_combined_cnt(vsi); + + /* these checks are for cases where user didn't specify a particular + * value on cmd line but we get non-zero value anyway via + * get_channels(); look at ethtool.c in ethtool repository (the user + * space part), particularly, do_schannels() routine + */ + if (ch->rx_count == vsi->num_rxq - curr_combined) + ch->rx_count = 0; + if (ch->tx_count == vsi->num_txq - curr_combined) + ch->tx_count = 0; + if (ch->combined_count == curr_combined) + ch->combined_count = 0; + + if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) { + netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n"); + return -EINVAL; + } + + new_rx = ch->combined_count + ch->rx_count; + new_tx = ch->combined_count + ch->tx_count; + + if (new_rx > ice_get_max_rxq(pf)) { + netdev_err(dev, "Maximum allowed Rx channels is %d\n", + ice_get_max_rxq(pf)); + return -EINVAL; + } + if (new_tx > ice_get_max_txq(pf)) { + netdev_err(dev, "Maximum allowed Tx channels is %d\n", + ice_get_max_txq(pf)); + return -EINVAL; + } + + ice_vsi_recfg_qs(vsi, new_rx, new_tx); + + if (new_rx && !netif_is_rxfh_configured(dev)) + return ice_vsi_set_dflt_rss_lut(vsi, new_rx); + + return 0; +} + enum ice_container_type { ICE_RX_CONTAINER, ICE_TX_CONTAINER, @@ -3183,7 +3363,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; break; default: - dev_dbg(&pf->pdev->dev, "Invalid c_type %d\n", c_type); + dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type); return -EINVAL; } @@ -3323,7 +3503,8 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, break; default: - dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type); + dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n", + c_type); return -EINVAL; } @@ -3420,10 +3601,17 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct ice_vsi *vsi = np->vsi; if (q_num < 0) { - int i; + int v_idx; + + ice_for_each_q_vector(vsi, v_idx) { + /* In some cases if DCB is configured the num_[rx|tx]q + * can be less than vsi->num_q_vectors. This check + * accounts for that so we don't report a false failure + */ + if (v_idx >= vsi->num_rxq && v_idx >= vsi->num_txq) + goto set_complete; - ice_for_each_q_vector(vsi, i) { - if (ice_set_q_coalesce(vsi, ec, i)) + if (ice_set_q_coalesce(vsi, ec, v_idx)) return -EINVAL; } goto set_complete; @@ -3625,6 +3813,8 @@ static const struct ethtool_ops ice_ethtool_ops = { .get_rxfh_indir_size = ice_get_rxfh_indir_size, .get_rxfh = ice_get_rxfh, .set_rxfh = ice_set_rxfh, + .get_channels = ice_get_channels, + .set_channels = ice_set_channels, .get_ts_info = ethtool_op_get_ts_info, .get_per_queue_coalesce = ice_get_per_q_coalesce, .set_per_queue_coalesce = ice_set_per_q_coalesce, @@ -3650,6 +3840,7 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = { .get_ringparam = ice_get_ringparam, .set_ringparam = ice_set_ringparam, .nway_reset = ice_nway_reset, + .get_channels = ice_get_channels, }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index d71f7ce0a265..e7449248fab4 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -52,26 +52,29 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; + struct device *dev; + + dev = ice_pf_to_dev(pf); /* allocate memory for both Tx and Rx ring pointers */ - vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, + vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq, sizeof(*vsi->tx_rings), GFP_KERNEL); if (!vsi->tx_rings) return -ENOMEM; - vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq, sizeof(*vsi->rx_rings), GFP_KERNEL); if (!vsi->rx_rings) goto err_rings; /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ - vsi->txq_map = devm_kcalloc(&pf->pdev->dev, (2 * vsi->alloc_txq), + vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq), sizeof(*vsi->txq_map), GFP_KERNEL); if (!vsi->txq_map) goto err_txq_map; - vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, + vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq, sizeof(*vsi->rxq_map), GFP_KERNEL); if (!vsi->rxq_map) goto err_rxq_map; @@ -81,7 +84,7 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) return 0; /* allocate memory for q_vector pointers */ - vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors, + vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, sizeof(*vsi->q_vectors), GFP_KERNEL); if (!vsi->q_vectors) goto err_vectors; @@ -89,13 +92,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) return 0; err_vectors: - devm_kfree(&pf->pdev->dev, vsi->rxq_map); + devm_kfree(dev, vsi->rxq_map); err_rxq_map: - devm_kfree(&pf->pdev->dev, vsi->txq_map); + devm_kfree(dev, vsi->txq_map); err_txq_map: - devm_kfree(&pf->pdev->dev, vsi->rx_rings); + devm_kfree(dev, vsi->rx_rings); err_rings: - devm_kfree(&pf->pdev->dev, vsi->tx_rings); + devm_kfree(dev, vsi->tx_rings); return -ENOMEM; } @@ -139,15 +142,24 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) case ICE_VSI_PF: vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf), num_online_cpus()); + if (vsi->req_txq) { + vsi->alloc_txq = vsi->req_txq; + vsi->num_txq = vsi->req_txq; + } pf->num_lan_tx = vsi->alloc_txq; /* only 1 Rx queue unless RSS is enabled */ - if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { vsi->alloc_rxq = 1; - else + } else { vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf), num_online_cpus()); + if (vsi->req_rxq) { + vsi->alloc_rxq = vsi->req_rxq; + vsi->num_rxq = vsi->req_rxq; + } + } pf->num_lan_rx = vsi->alloc_rxq; @@ -169,7 +181,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) vsi->alloc_rxq = 1; break; default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); + dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type); break; } @@ -215,7 +227,7 @@ void ice_vsi_delete(struct ice_vsi *vsi) struct ice_vsi_ctx *ctxt; enum ice_status status; - ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return; @@ -227,10 +239,10 @@ void ice_vsi_delete(struct ice_vsi *vsi) status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); if (status) - dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", - vsi->vsi_num); + dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n", + vsi->vsi_num, status); - devm_kfree(&pf->pdev->dev, ctxt); + kfree(ctxt); } /** @@ -240,26 +252,29 @@ void ice_vsi_delete(struct ice_vsi *vsi) static void ice_vsi_free_arrays(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; + struct device *dev; + + dev = ice_pf_to_dev(pf); /* free the ring and vector containers */ if (vsi->q_vectors) { - devm_kfree(&pf->pdev->dev, vsi->q_vectors); + devm_kfree(dev, vsi->q_vectors); vsi->q_vectors = NULL; } if (vsi->tx_rings) { - devm_kfree(&pf->pdev->dev, vsi->tx_rings); + devm_kfree(dev, vsi->tx_rings); vsi->tx_rings = NULL; } if (vsi->rx_rings) { - devm_kfree(&pf->pdev->dev, vsi->rx_rings); + devm_kfree(dev, vsi->rx_rings); vsi->rx_rings = NULL; } if (vsi->txq_map) { - devm_kfree(&pf->pdev->dev, vsi->txq_map); + devm_kfree(dev, vsi->txq_map); vsi->txq_map = NULL; } if (vsi->rxq_map) { - devm_kfree(&pf->pdev->dev, vsi->rxq_map); + devm_kfree(dev, vsi->rxq_map); vsi->rxq_map = NULL; } } @@ -276,6 +291,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) int ice_vsi_clear(struct ice_vsi *vsi) { struct ice_pf *pf = NULL; + struct device *dev; if (!vsi) return 0; @@ -284,10 +300,10 @@ int ice_vsi_clear(struct ice_vsi *vsi) return -EINVAL; pf = vsi->back; + dev = ice_pf_to_dev(pf); if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { - dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", - vsi->idx); + dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); return -EINVAL; } @@ -300,7 +316,7 @@ int ice_vsi_clear(struct ice_vsi *vsi) ice_vsi_free_arrays(vsi); mutex_unlock(&pf->sw_mutex); - devm_kfree(&pf->pdev->dev, vsi); + devm_kfree(dev, vsi); return 0; } @@ -333,6 +349,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) static struct ice_vsi * ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) { + struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL; /* Need to protect the allocation of the VSIs at the PF level */ @@ -343,11 +360,11 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) * is available to be populated */ if (pf->next_vsi == ICE_NO_VSI) { - dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); + dev_dbg(dev, "out of VSI slots!\n"); goto unlock_pf; } - vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); + vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL); if (!vsi) goto unlock_pf; @@ -379,7 +396,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) goto err_rings; break; default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); + dev_warn(dev, "Unknown VSI type %d\n", vsi->type); goto unlock_pf; } @@ -392,7 +409,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) goto unlock_pf; err_rings: - devm_kfree(&pf->pdev->dev, vsi); + devm_kfree(dev, vsi); vsi = NULL; unlock_pf: mutex_unlock(&pf->sw_mutex); @@ -481,14 +498,15 @@ bool ice_is_safe_mode(struct ice_pf *pf) */ static void ice_rss_clean(struct ice_vsi *vsi) { - struct ice_pf *pf; + struct ice_pf *pf = vsi->back; + struct device *dev; - pf = vsi->back; + dev = ice_pf_to_dev(pf); if (vsi->rss_hkey_user) - devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); + devm_kfree(dev, vsi->rss_hkey_user); if (vsi->rss_lut_user) - devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); + devm_kfree(dev, vsi->rss_lut_user); } /** @@ -526,7 +544,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) case ICE_VSI_LB: break; default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", + dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type); break; } @@ -630,7 +648,9 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) else max_rss = ICE_MAX_SMALL_RSS_QS; qcount_rx = min_t(int, rx_numq_tc, max_rss); - qcount_rx = min_t(int, qcount_rx, vsi->rss_size); + if (!vsi->req_rxq) + qcount_rx = min_t(int, qcount_rx, + vsi->rss_size); } } @@ -702,9 +722,11 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) { u8 lut_type, hash_type; + struct device *dev; struct ice_pf *pf; pf = vsi->back; + dev = ice_pf_to_dev(pf); switch (vsi->type) { case ICE_VSI_PF: @@ -718,11 +740,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; case ICE_VSI_LB: - dev_dbg(&pf->pdev->dev, "Unsupported VSI type %s\n", + dev_dbg(dev, "Unsupported VSI type %s\n", ice_vsi_type_str(vsi->type)); return; default: - dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); + dev_warn(dev, "Unknown VSI type %d\n", vsi->type); return; } @@ -735,18 +757,21 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) /** * ice_vsi_init - Create and initialize a VSI * @vsi: the VSI being configured + * @init_vsi: is this call creating a VSI * * This initializes a VSI context depending on the VSI type to be added and * passes it down to the add_vsi aq command to create a new VSI. */ -static int ice_vsi_init(struct ice_vsi *vsi) +static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) { struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; struct ice_vsi_ctx *ctxt; + struct device *dev; int ret = 0; - ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); + dev = ice_pf_to_dev(pf); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return -ENOMEM; @@ -763,7 +788,8 @@ static int ice_vsi_init(struct ice_vsi *vsi) ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; break; default: - return -ENODEV; + ret = -ENODEV; + goto out; } ice_set_dflt_vsi_ctx(ctxt); @@ -772,11 +798,24 @@ static int ice_vsi_init(struct ice_vsi *vsi) ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; /* Set LUT type and HASH type if RSS is enabled */ - if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) + if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { ice_set_rss_vsi_ctx(ctxt, vsi); + /* if updating VSI context, make sure to set valid_section: + * to indicate which section of VSI context being updated + */ + if (!init_vsi) + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); + } ctxt->info.sw_id = vsi->port_info->sw_id; ice_vsi_setup_q_map(vsi, ctxt); + if (!init_vsi) /* means VSI being updated */ + /* must to indicate which section of VSI context are + * being modified + */ + ctxt->info.valid_sections |= + cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); /* Enable MAC Antispoof with new VSI being initialized or updated */ if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) { @@ -793,11 +832,20 @@ static int ice_vsi_init(struct ice_vsi *vsi) cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); } - ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); - if (ret) { - dev_err(&pf->pdev->dev, - "Add VSI failed, err %d\n", ret); - return -EIO; + if (init_vsi) { + ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(dev, "Add VSI failed, err %d\n", ret); + ret = -EIO; + goto out; + } + } else { + ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (ret) { + dev_err(dev, "Update VSI failed, err %d\n", ret); + ret = -EIO; + goto out; + } } /* keep context for update VSI operations */ @@ -806,7 +854,8 @@ static int ice_vsi_init(struct ice_vsi *vsi) /* record VSI number returned */ vsi->vsi_num = ctxt->vsi_num; - devm_kfree(&pf->pdev->dev, ctxt); +out: + kfree(ctxt); return ret; } @@ -823,14 +872,16 @@ static int ice_vsi_init(struct ice_vsi *vsi) static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; + struct device *dev; u16 num_q_vectors; + dev = ice_pf_to_dev(pf); /* SRIOV doesn't grab irq_tracker entries for each VSI */ if (vsi->type == ICE_VSI_VF) return 0; if (vsi->base_vector) { - dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", + dev_dbg(dev, "VSI %d has non-zero base vector %d\n", vsi->vsi_num, vsi->base_vector); return -EEXIST; } @@ -840,7 +891,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { - dev_err(&pf->pdev->dev, + dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n", num_q_vectors, vsi->vsi_num, vsi->base_vector); return -ENOENT; @@ -883,8 +934,10 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) static int ice_vsi_alloc_rings(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; + struct device *dev; int i; + dev = ice_pf_to_dev(pf); /* Allocate Tx rings */ for (i = 0; i < vsi->alloc_txq; i++) { struct ice_ring *ring; @@ -899,7 +952,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->reg_idx = vsi->txq_map[i]; ring->ring_active = false; ring->vsi = vsi; - ring->dev = &pf->pdev->dev; + ring->dev = dev; ring->count = vsi->num_tx_desc; vsi->tx_rings[i] = ring; } @@ -918,7 +971,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->ring_active = false; ring->vsi = vsi; ring->netdev = vsi->netdev; - ring->dev = &pf->pdev->dev; + ring->dev = dev; ring->count = vsi->num_rx_desc; vsi->rx_rings[i] = ring; } @@ -944,8 +997,7 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) int err = 0; u8 *lut; - lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size, - GFP_KERNEL); + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; @@ -958,7 +1010,7 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena) } err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size); - devm_kfree(&vsi->back->pdev->dev, lut); + kfree(lut); return err; } @@ -971,12 +1023,14 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) struct ice_aqc_get_set_rss_keys *key; struct ice_pf *pf = vsi->back; enum ice_status status; + struct device *dev; int err = 0; u8 *lut; + dev = ice_pf_to_dev(pf); vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); - lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL); + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); if (!lut) return -ENOMEM; @@ -989,13 +1043,12 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) vsi->rss_table_size); if (status) { - dev_err(&pf->pdev->dev, - "set_rss_lut failed, error %d\n", status); + dev_err(dev, "set_rss_lut failed, error %d\n", status); err = -EIO; goto ice_vsi_cfg_rss_exit; } - key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL); + key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) { err = -ENOMEM; goto ice_vsi_cfg_rss_exit; @@ -1012,14 +1065,13 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); if (status) { - dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n", - status); + dev_err(dev, "set_rss_key failed, error %d\n", status); err = -EIO; } - devm_kfree(&pf->pdev->dev, key); + kfree(key); ice_vsi_cfg_rss_exit: - devm_kfree(&pf->pdev->dev, lut); + kfree(lut); return err; } @@ -1039,7 +1091,7 @@ int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, struct ice_fltr_list_entry *tmp; struct ice_pf *pf = vsi->back; - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); + tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC); if (!tmp) return -ENOMEM; @@ -1131,9 +1183,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) struct ice_pf *pf = vsi->back; LIST_HEAD(tmp_add_list); enum ice_status status; + struct device *dev; int err = 0; - tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); + dev = ice_pf_to_dev(pf); + tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; @@ -1150,11 +1204,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) status = ice_add_vlan(&pf->hw, &tmp_add_list); if (status) { err = -ENODEV; - dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", - vid, vsi->vsi_num); + dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid, + vsi->vsi_num); } - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + ice_free_fltr_list(dev, &tmp_add_list); return err; } @@ -1171,9 +1225,11 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) struct ice_pf *pf = vsi->back; LIST_HEAD(tmp_add_list); enum ice_status status; + struct device *dev; int err = 0; - list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); + dev = ice_pf_to_dev(pf); + list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL); if (!list) return -ENOMEM; @@ -1189,17 +1245,17 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) status = ice_remove_vlan(&pf->hw, &tmp_add_list); if (status == ICE_ERR_DOES_NOT_EXIST) { - dev_dbg(&pf->pdev->dev, + dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n", vid, vsi->vsi_num, status); } else if (status) { - dev_err(&pf->pdev->dev, + dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n", vid, vsi->vsi_num, status); err = -EIO; } - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + ice_free_fltr_list(dev, &tmp_add_list); return err; } @@ -1397,13 +1453,12 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) */ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) { - struct device *dev = &vsi->back->pdev->dev; struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; enum ice_status status; int ret = 0; - ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return -ENOMEM; @@ -1421,7 +1476,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", + dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", status, hw->adminq.sq_last_status); ret = -EIO; goto out; @@ -1429,7 +1484,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) vsi->info.vlan_flags = ctxt->info.vlan_flags; out: - devm_kfree(dev, ctxt); + kfree(ctxt); return ret; } @@ -1440,13 +1495,12 @@ out: */ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) { - struct device *dev = &vsi->back->pdev->dev; struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; enum ice_status status; int ret = 0; - ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return -ENOMEM; @@ -1468,7 +1522,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", + dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", ena, status, hw->adminq.sq_last_status); ret = -EIO; goto out; @@ -1476,7 +1530,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) vsi->info.vlan_flags = ctxt->info.vlan_flags; out: - devm_kfree(dev, ctxt); + kfree(ctxt); return ret; } @@ -1569,7 +1623,6 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) { struct ice_vsi_ctx *ctxt; - struct device *dev; struct ice_pf *pf; int status; @@ -1577,8 +1630,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) return -EINVAL; pf = vsi->back; - dev = &pf->pdev->dev; - ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return -ENOMEM; @@ -1612,11 +1664,11 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) vsi->info.sec_flags = ctxt->info.sec_flags; vsi->info.sw_flags2 = ctxt->info.sw_flags2; - devm_kfree(dev, ctxt); + kfree(ctxt); return 0; err_out: - devm_kfree(dev, ctxt); + kfree(ctxt); return -EIO; } @@ -1685,8 +1737,10 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) struct ice_pf *pf = vsi->back; LIST_HEAD(tmp_add_list); enum ice_status status; + struct device *dev; - list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); + dev = ice_pf_to_dev(pf); + list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL); if (!list) return; @@ -1706,11 +1760,11 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); if (status) - dev_err(&pf->pdev->dev, + dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n", vsi->vsi_num, status); - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + ice_free_fltr_list(dev, &tmp_add_list); } /** @@ -1725,8 +1779,10 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) struct ice_pf *pf = vsi->back; LIST_HEAD(tmp_add_list); enum ice_status status; + struct device *dev; - list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); + dev = ice_pf_to_dev(pf); + list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL); if (!list) return; @@ -1753,12 +1809,11 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); if (status) - dev_err(&pf->pdev->dev, - "Fail %s %s LLDP rule on VSI %i error: %d\n", + dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", create ? "adding" : "removing", tx ? "TX" : "RX", vsi->vsi_num, status); - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + ice_free_fltr_list(dev, &tmp_add_list); } /** @@ -1780,7 +1835,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, enum ice_vsi_type type, u16 vf_id) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); enum ice_status status; struct ice_vsi *vsi; int ret, i; @@ -1816,7 +1871,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_vsi_set_tc_cfg(vsi); /* create the VSI */ - ret = ice_vsi_init(vsi); + ret = ice_vsi_init(vsi, true); if (ret) goto unroll_get_qs; @@ -1889,8 +1944,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { - dev_err(&pf->pdev->dev, - "VSI %d failed lan queue config, error %d\n", + dev_err(dev, "VSI %d failed lan queue config, error %d\n", vsi->vsi_num, status); goto unroll_vector_base; } @@ -2002,8 +2056,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(irq_num, NULL); synchronize_irq(irq_num); - devm_free_irq(&pf->pdev->dev, irq_num, - vsi->q_vectors[i]); + devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); } } @@ -2189,7 +2242,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) return -EINVAL; if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { - dev_err(&pf->pdev->dev, + dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n", needed, res->num_entries, id); return -EINVAL; @@ -2351,10 +2404,11 @@ int ice_vsi_release(struct ice_vsi *vsi) /** * ice_vsi_rebuild - Rebuild VSI after reset * @vsi: VSI to be rebuild + * @init_vsi: is this an initialization or a reconfigure of the VSI * * Returns 0 on success and negative value on failure */ -int ice_vsi_rebuild(struct ice_vsi *vsi) +int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_vf *vf = NULL; @@ -2406,7 +2460,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ice_vsi_set_tc_cfg(vsi); /* Initialize VSI struct elements and create VSI in FW */ - ret = ice_vsi_init(vsi); + ret = ice_vsi_init(vsi, init_vsi); if (ret < 0) goto err_vsi; @@ -2471,10 +2525,15 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { - dev_err(&pf->pdev->dev, + dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n", vsi->vsi_num, status); - goto err_vectors; + if (init_vsi) { + ret = -EIO; + goto err_vectors; + } else { + return ice_schedule_reset(pf, ICE_RESET_PFR); + } } return 0; @@ -2534,9 +2593,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) struct ice_vsi_ctx *ctx; struct ice_pf *pf = vsi->back; enum ice_status status; + struct device *dev; int i, ret = 0; u8 num_tc = 0; + dev = ice_pf_to_dev(pf); + ice_for_each_traffic_class(i) { /* build bitmap of enabled TCs */ if (ena_tc & BIT(i)) @@ -2548,7 +2610,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) vsi->tc_cfg.ena_tc = ena_tc; vsi->tc_cfg.numtc = num_tc; - ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -2561,7 +2623,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); if (status) { - dev_info(&pf->pdev->dev, "Failed VSI Update\n"); + dev_info(dev, "Failed VSI Update\n"); ret = -EIO; goto out; } @@ -2570,8 +2632,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) max_txqs); if (status) { - dev_err(&pf->pdev->dev, - "VSI %d failed TC config, error %d\n", + dev_err(dev, "VSI %d failed TC config, error %d\n", vsi->vsi_num, status); ret = -EIO; goto out; @@ -2581,7 +2642,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ice_vsi_cfg_netdev_tc(vsi, ena_tc); out: - devm_kfree(&pf->pdev->dev, ctx); + kfree(ctx); return ret; } #endif /* CONFIG_DCB */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index e86aa60c0254..6e31e30aba39 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -73,7 +73,7 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); int ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id); -int ice_vsi_rebuild(struct ice_vsi *vsi); +int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi); bool ice_is_reset_in_progress(unsigned long *state); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5681e3be81f2..69bff085acf7 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -44,6 +44,7 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); static struct workqueue_struct *ice_wq; static const struct net_device_ops ice_netdev_safe_mode_ops; static const struct net_device_ops ice_netdev_ops; +static int ice_vsi_open(struct ice_vsi *vsi); static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); @@ -161,7 +162,7 @@ unregister: * had an error */ if (status && vsi->netdev->reg_state == NETREG_REGISTERED) { - dev_err(&pf->pdev->dev, + dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n", status); unregister_netdev(vsi->netdev); @@ -495,7 +496,7 @@ ice_prepare_for_reset(struct ice_pf *pf) */ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) { - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; dev_dbg(dev, "reset_type 0x%x requested\n", reset_type); @@ -724,7 +725,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) an = "False"; /* Get FEC mode requested based on PHY caps last SW configuration */ - caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL); + caps = kzalloc(sizeof(*caps), GFP_KERNEL); if (!caps) { fec_req = "Unknown"; goto done; @@ -744,7 +745,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) else fec_req = "NONE"; - devm_kfree(&vsi->back->pdev->dev, caps); + kfree(caps); done: netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n", @@ -792,6 +793,7 @@ static int ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, u16 link_speed) { + struct device *dev = ice_pf_to_dev(pf); struct ice_phy_info *phy_info; struct ice_vsi *vsi; u16 old_link_speed; @@ -809,7 +811,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, */ result = ice_update_link_info(pi); if (result) - dev_dbg(&pf->pdev->dev, + dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n", pi->lport); @@ -828,7 +830,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, result = ice_aq_set_link_restart_an(pi, false, NULL); if (result) { - dev_dbg(&pf->pdev->dev, + dev_dbg(dev, "Failed to set link down, VSI %d error %d\n", vsi->vsi_num, result); return result; @@ -924,7 +926,7 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) !!(link_data->link_info & ICE_AQ_LINK_UP), le16_to_cpu(link_data->link_speed)); if (status) - dev_dbg(&pf->pdev->dev, + dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n", status); return status; @@ -937,6 +939,7 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) */ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) { + struct device *dev = ice_pf_to_dev(pf); struct ice_rq_event_info event; struct ice_hw *hw = &pf->hw; struct ice_ctl_q_info *cq; @@ -958,8 +961,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) qtype = "Mailbox"; break; default: - dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", - q_type); + dev_warn(dev, "Unknown control queue type 0x%x\n", q_type); return 0; } @@ -971,15 +973,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) PF_FW_ARQLEN_ARQCRIT_M)) { oldval = val; if (val & PF_FW_ARQLEN_ARQVFE_M) - dev_dbg(&pf->pdev->dev, - "%s Receive Queue VF Error detected\n", qtype); + dev_dbg(dev, "%s Receive Queue VF Error detected\n", + qtype); if (val & PF_FW_ARQLEN_ARQOVFL_M) { - dev_dbg(&pf->pdev->dev, + dev_dbg(dev, "%s Receive Queue Overflow Error detected\n", qtype); } if (val & PF_FW_ARQLEN_ARQCRIT_M) - dev_dbg(&pf->pdev->dev, + dev_dbg(dev, "%s Receive Queue Critical Error detected\n", qtype); val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | @@ -993,16 +995,14 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) PF_FW_ATQLEN_ATQCRIT_M)) { oldval = val; if (val & PF_FW_ATQLEN_ATQVFE_M) - dev_dbg(&pf->pdev->dev, + dev_dbg(dev, "%s Send Queue VF Error detected\n", qtype); if (val & PF_FW_ATQLEN_ATQOVFL_M) { - dev_dbg(&pf->pdev->dev, - "%s Send Queue Overflow Error detected\n", + dev_dbg(dev, "%s Send Queue Overflow Error detected\n", qtype); } if (val & PF_FW_ATQLEN_ATQCRIT_M) - dev_dbg(&pf->pdev->dev, - "%s Send Queue Critical Error detected\n", + dev_dbg(dev, "%s Send Queue Critical Error detected\n", qtype); val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | PF_FW_ATQLEN_ATQCRIT_M); @@ -1011,8 +1011,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) } event.buf_len = cq->rq_buf_size; - event.msg_buf = devm_kzalloc(&pf->pdev->dev, event.buf_len, - GFP_KERNEL); + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return 0; @@ -1024,8 +1023,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) if (ret == ICE_ERR_AQ_NO_WORK) break; if (ret) { - dev_err(&pf->pdev->dev, - "%s Receive Queue event error %d\n", qtype, + dev_err(dev, "%s Receive Queue event error %d\n", qtype, ret); break; } @@ -1035,8 +1033,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) switch (opcode) { case ice_aqc_opc_get_link_status: if (ice_handle_link_event(pf, &event)) - dev_err(&pf->pdev->dev, - "Could not handle link event\n"); + dev_err(dev, "Could not handle link event\n"); break; case ice_mbx_opc_send_msg_to_pf: ice_vc_process_vf_msg(pf, &event); @@ -1048,14 +1045,14 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ice_dcb_process_lldp_set_mib_change(pf, &event); break; default: - dev_dbg(&pf->pdev->dev, + dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", qtype, opcode); break; } } while (pending && (i++ < ICE_DFLT_IRQ_WORK)); - devm_kfree(&pf->pdev->dev, event.msg_buf); + kfree(event.msg_buf); return pending && (i == ICE_DFLT_IRQ_WORK); } @@ -1199,6 +1196,7 @@ static void ice_service_timer(struct timer_list *t) */ static void ice_handle_mdd_event(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; bool mdd_detected = false; u32 reg; @@ -1220,7 +1218,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) GL_MDET_TX_PQM_QNUM_S); if (netif_msg_tx_err(pf)) - dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); wr32(hw, GL_MDET_TX_PQM, 0xffffffff); mdd_detected = true; @@ -1238,7 +1236,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) GL_MDET_TX_TCLAN_QNUM_S); if (netif_msg_rx_err(pf)) - dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); mdd_detected = true; @@ -1256,7 +1254,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) GL_MDET_RX_QNUM_S); if (netif_msg_rx_err(pf)) - dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", + dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); wr32(hw, GL_MDET_RX, 0xffffffff); mdd_detected = true; @@ -1268,21 +1266,21 @@ static void ice_handle_mdd_event(struct ice_pf *pf) reg = rd32(hw, PF_MDET_TX_PQM); if (reg & PF_MDET_TX_PQM_VALID_M) { wr32(hw, PF_MDET_TX_PQM, 0xFFFF); - dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + dev_info(dev, "TX driver issue detected, PF reset issued\n"); pf_mdd_detected = true; } reg = rd32(hw, PF_MDET_TX_TCLAN); if (reg & PF_MDET_TX_TCLAN_VALID_M) { wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); - dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + dev_info(dev, "TX driver issue detected, PF reset issued\n"); pf_mdd_detected = true; } reg = rd32(hw, PF_MDET_RX); if (reg & PF_MDET_RX_VALID_M) { wr32(hw, PF_MDET_RX, 0xFFFF); - dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); + dev_info(dev, "RX driver issue detected, PF reset issued\n"); pf_mdd_detected = true; } /* Queue belongs to the PF initiate a reset */ @@ -1302,7 +1300,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_TX_PQM_VALID_M) { wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); vf_mdd_detected = true; - dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + dev_info(dev, "TX driver issue detected on VF %d\n", i); } @@ -1310,7 +1308,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_TX_TCLAN_VALID_M) { wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); vf_mdd_detected = true; - dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + dev_info(dev, "TX driver issue detected on VF %d\n", i); } @@ -1318,7 +1316,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_TX_TDPU_VALID_M) { wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); vf_mdd_detected = true; - dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + dev_info(dev, "TX driver issue detected on VF %d\n", i); } @@ -1326,7 +1324,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (reg & VP_MDET_RX_VALID_M) { wr32(hw, VP_MDET_RX(i), 0xFFFF); vf_mdd_detected = true; - dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", + dev_info(dev, "RX driver issue detected on VF %d\n", i); } @@ -1334,7 +1332,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) vf->num_mdd_events++; if (vf->num_mdd_events && vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD) - dev_info(&pf->pdev->dev, + dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n", i, vf->num_mdd_events); } @@ -1370,7 +1368,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) pi = vsi->port_info; - pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return -ENOMEM; @@ -1389,7 +1387,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) goto out; - cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); if (!cfg) { retcode = -ENOMEM; goto out; @@ -1414,9 +1412,9 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) retcode = -EIO; } - devm_kfree(dev, cfg); + kfree(cfg); out: - devm_kfree(dev, pcaps); + kfree(pcaps); return retcode; } @@ -1528,6 +1526,44 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) } /** + * ice_schedule_reset - schedule a reset + * @pf: board private structure + * @reset: reset being requested + */ +int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) +{ + struct device *dev = ice_pf_to_dev(pf); + + /* bail out if earlier reset has failed */ + if (test_bit(__ICE_RESET_FAILED, pf->state)) { + dev_dbg(dev, "earlier reset has failed\n"); + return -EIO; + } + /* bail if reset/recovery already in progress */ + if (ice_is_reset_in_progress(pf->state)) { + dev_dbg(dev, "Reset already in progress\n"); + return -EBUSY; + } + + switch (reset) { + case ICE_RESET_PFR: + set_bit(__ICE_PFR_REQ, pf->state); + break; + case ICE_RESET_CORER: + set_bit(__ICE_CORER_REQ, pf->state); + break; + case ICE_RESET_GLOBR: + set_bit(__ICE_GLOBR_REQ, pf->state); + break; + default: + return -EINVAL; + } + + ice_service_task_schedule(pf); + return 0; +} + +/** * ice_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed * @mask: the new affinity mask @@ -1581,11 +1617,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) int q_vectors = vsi->num_q_vectors; struct ice_pf *pf = vsi->back; int base = vsi->base_vector; + struct device *dev; int rx_int_idx = 0; int tx_int_idx = 0; int vector, err; int irq_num; + dev = ice_pf_to_dev(pf); for (vector = 0; vector < q_vectors; vector++) { struct ice_q_vector *q_vector = vsi->q_vectors[vector]; @@ -1605,8 +1643,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) /* skip this unused q_vector */ continue; } - err = devm_request_irq(&pf->pdev->dev, irq_num, - vsi->irq_handler, 0, + err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0, q_vector->name, q_vector); if (err) { netdev_err(vsi->netdev, @@ -1632,7 +1669,7 @@ free_q_irqs: irq_num = pf->msix_entries[base + vector].vector, irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL); - devm_free_irq(&pf->pdev->dev, irq_num, &vsi->q_vectors[vector]); + devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); } return err; } @@ -1721,9 +1758,11 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) .mapping_mode = ICE_VSI_MAP_CONTIG }; enum ice_status status; + struct device *dev; int i, v_idx; - vsi->xdp_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_xdp_txq, + dev = ice_pf_to_dev(pf); + vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, sizeof(*vsi->xdp_rings), GFP_KERNEL); if (!vsi->xdp_rings) return -ENOMEM; @@ -1770,8 +1809,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); if (status) { - dev_err(&pf->pdev->dev, - "Failed VSI LAN queue config for XDP, error:%d\n", + dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n", status); goto clear_xdp_rings; } @@ -1793,7 +1831,7 @@ err_map_xdp: } mutex_unlock(&pf->avail_q_mutex); - devm_kfree(&pf->pdev->dev, vsi->xdp_rings); + devm_kfree(dev, vsi->xdp_rings); return -ENOMEM; } @@ -1846,7 +1884,7 @@ free_qmap: vsi->xdp_rings[i] = NULL; } - devm_kfree(&pf->pdev->dev, vsi->xdp_rings); + devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); vsi->xdp_rings = NULL; if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) @@ -1993,8 +2031,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) struct ice_pf *pf = (struct ice_pf *)data; struct ice_hw *hw = &pf->hw; irqreturn_t ret = IRQ_NONE; + struct device *dev; u32 oicr, ena_mask; + dev = ice_pf_to_dev(pf); set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); @@ -2030,8 +2070,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) else if (reset == ICE_RESET_EMPR) pf->empr_count++; else - dev_dbg(&pf->pdev->dev, "Invalid reset type %d\n", - reset); + dev_dbg(dev, "Invalid reset type %d\n", reset); /* If a reset cycle isn't already in progress, we set a bit in * pf->state so that the service task can start a reset/rebuild. @@ -2065,8 +2104,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_HMC_ERR_M) { ena_mask &= ~PFINT_OICR_HMC_ERR_M; - dev_dbg(&pf->pdev->dev, - "HMC Error interrupt - info 0x%x, data 0x%x\n", + dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n", rd32(hw, PFHMC_ERRORINFO), rd32(hw, PFHMC_ERRORDATA)); } @@ -2074,8 +2112,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) /* Report any remaining unexpected interrupts */ oicr &= ena_mask; if (oicr) { - dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n", - oicr); + dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr); /* If a critical error is pending there is no choice but to * reset the device. */ @@ -2133,7 +2170,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) if (pf->msix_entries) { synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); - devm_free_irq(&pf->pdev->dev, + devm_free_irq(ice_pf_to_dev(pf), pf->msix_entries[pf->oicr_idx].vector, pf); } @@ -2177,13 +2214,13 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx) */ static int ice_req_irq_msix_misc(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int oicr_idx, err = 0; if (!pf->int_name[0]) snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", - dev_driver_string(&pf->pdev->dev), - dev_name(&pf->pdev->dev)); + dev_driver_string(dev), dev_name(dev)); /* Do not request IRQ but do enable OICR interrupt since settings are * lost during reset. Note that this function is called only during @@ -2200,12 +2237,10 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) pf->num_avail_sw_msix -= 1; pf->oicr_idx = oicr_idx; - err = devm_request_irq(&pf->pdev->dev, - pf->msix_entries[pf->oicr_idx].vector, + err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, ice_misc_intr, 0, pf->int_name, pf); if (err) { - dev_err(&pf->pdev->dev, - "devm_request_irq for %s failed: %d\n", + dev_err(dev, "devm_request_irq for %s failed: %d\n", pf->int_name, err); ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); pf->num_avail_sw_msix += 1; @@ -2338,7 +2373,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) ice_set_ops(netdev); if (vsi->type == ICE_VSI_PF) { - SET_NETDEV_DEV(netdev, &pf->pdev->dev); + SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf)); ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); @@ -2665,7 +2700,7 @@ static int ice_init_pf(struct ice_pf *pf) pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); if (!pf->avail_rxqs) { - devm_kfree(&pf->pdev->dev, pf->avail_txqs); + devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs); pf->avail_txqs = NULL; return -ENOMEM; } @@ -2682,6 +2717,7 @@ static int ice_init_pf(struct ice_pf *pf) */ static int ice_ena_msix_range(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); int v_left, v_actual, v_budget = 0; int needed, err, i; @@ -2702,7 +2738,7 @@ static int ice_ena_msix_range(struct ice_pf *pf) v_budget += needed; v_left -= needed; - pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, + pf->msix_entries = devm_kcalloc(dev, v_budget, sizeof(*pf->msix_entries), GFP_KERNEL); if (!pf->msix_entries) { @@ -2718,13 +2754,13 @@ static int ice_ena_msix_range(struct ice_pf *pf) ICE_MIN_MSIX, v_budget); if (v_actual < 0) { - dev_err(&pf->pdev->dev, "unable to reserve MSI-X vectors\n"); + dev_err(dev, "unable to reserve MSI-X vectors\n"); err = v_actual; goto msix_err; } if (v_actual < v_budget) { - dev_warn(&pf->pdev->dev, + dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", v_budget, v_actual); /* 2 vectors for LAN (traffic + OICR) */ @@ -2743,11 +2779,11 @@ static int ice_ena_msix_range(struct ice_pf *pf) return v_actual; msix_err: - devm_kfree(&pf->pdev->dev, pf->msix_entries); + devm_kfree(dev, pf->msix_entries); goto exit_err; no_hw_vecs_left_err: - dev_err(&pf->pdev->dev, + dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", needed, v_left); err = -ERANGE; @@ -2763,7 +2799,7 @@ exit_err: static void ice_dis_msix(struct ice_pf *pf) { pci_disable_msix(pf->pdev); - devm_kfree(&pf->pdev->dev, pf->msix_entries); + devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); pf->msix_entries = NULL; } @@ -2776,7 +2812,7 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf) ice_dis_msix(pf); if (pf->irq_tracker) { - devm_kfree(&pf->pdev->dev, pf->irq_tracker); + devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); pf->irq_tracker = NULL; } } @@ -2796,7 +2832,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) /* set up vector assignment tracking */ pf->irq_tracker = - devm_kzalloc(&pf->pdev->dev, sizeof(*pf->irq_tracker) + + devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) + (sizeof(u16) * vectors), GFP_KERNEL); if (!pf->irq_tracker) { ice_dis_msix(pf); @@ -2812,6 +2848,52 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) } /** + * ice_vsi_recfg_qs - Change the number of queues on a VSI + * @vsi: VSI being changed + * @new_rx: new number of Rx queues + * @new_tx: new number of Tx queues + * + * Only change the number of queues if new_tx, or new_rx is non-0. + * + * Returns 0 on success. + */ +int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) +{ + struct ice_pf *pf = vsi->back; + int err = 0, timeout = 50; + + if (!new_rx && !new_tx) + return -EINVAL; + + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + + if (new_tx) + vsi->req_txq = new_tx; + if (new_rx) + vsi->req_rxq = new_rx; + + /* set for the next time the netdev is started */ + if (!netif_running(vsi->netdev)) { + ice_vsi_rebuild(vsi, false); + dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + ice_vsi_close(vsi); + ice_vsi_rebuild(vsi, false); + ice_pf_dcb_recfg(pf); + ice_vsi_open(vsi); +done: + clear_bit(__ICE_CFG_BUSY, pf->state); + return err; +} + +/** * ice_log_pkg_init - log result of DDP package load * @hw: pointer to hardware info * @status: status of package load @@ -2820,7 +2902,7 @@ static void ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status) { struct ice_pf *pf = (struct ice_pf *)hw->back; - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); switch (*status) { case ICE_SUCCESS: @@ -2939,7 +3021,7 @@ static void ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) { enum ice_status status = ICE_ERR_PARAM; - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; /* Load DDP Package */ @@ -2979,7 +3061,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf) static void ice_verify_cacheline_size(struct ice_pf *pf) { if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) - dev_warn(&pf->pdev->dev, + dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n", ICE_CACHE_LINE_BYTES); } @@ -3049,7 +3131,7 @@ static void ice_request_fw(struct ice_pf *pf) { char *opt_fw_filename = ice_get_opt_fw_name(pf); const struct firmware *firmware = NULL; - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); int err = 0; /* optional device-specific DDP (if present) overrides the default DDP @@ -3240,7 +3322,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_setup_pf_sw(pf); if (err) { - dev_err(dev, "probe failed due to setup PF switch:%d\n", err); + dev_err(dev, "probe failed due to setup PF switch: %d\n", err); goto err_alloc_sw_unroll; } @@ -3288,7 +3370,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err_alloc_sw_unroll: set_bit(__ICE_SERVICE_DIS, pf->state); set_bit(__ICE_DOWN, pf->state); - devm_kfree(&pf->pdev->dev, pf->first_sw); + devm_kfree(dev, pf->first_sw); err_msix_misc_unroll: ice_free_irq_msix_misc(pf); err_init_interrupt_unroll: @@ -4410,7 +4492,7 @@ static int ice_vsi_open(struct ice_vsi *vsi) goto err_setup_rx; snprintf(int_name, sizeof(int_name) - 1, "%s-%s", - dev_driver_string(&pf->pdev->dev), vsi->netdev->name); + dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); err = ice_vsi_req_irq_msix(vsi, int_name); if (err) goto err_setup_rx; @@ -4459,7 +4541,7 @@ static void ice_vsi_release_all(struct ice_pf *pf) err = ice_vsi_release(pf->vsi[i]); if (err) - dev_dbg(&pf->pdev->dev, + dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", i, err, pf->vsi[i]->vsi_num); } @@ -4474,6 +4556,7 @@ static void ice_vsi_release_all(struct ice_pf *pf) */ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) { + struct device *dev = ice_pf_to_dev(pf); enum ice_status status; int i, err; @@ -4484,9 +4567,9 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) continue; /* rebuild the VSI */ - err = ice_vsi_rebuild(vsi); + err = ice_vsi_rebuild(vsi, true); if (err) { - dev_err(&pf->pdev->dev, + dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", err, vsi->idx, ice_vsi_type_str(type)); return err; @@ -4495,7 +4578,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) /* replay filters for the VSI */ status = ice_replay_vsi(&pf->hw, vsi->idx); if (status) { - dev_err(&pf->pdev->dev, + dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n", status, vsi->idx, ice_vsi_type_str(type)); return -EIO; @@ -4509,14 +4592,14 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) /* enable the VSI */ err = ice_ena_vsi(vsi, false); if (err) { - dev_err(&pf->pdev->dev, + dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n", err, vsi->idx, ice_vsi_type_str(type)); return err; } - dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %s\n", - vsi->idx, ice_vsi_type_str(type)); + dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, + ice_vsi_type_str(type)); } return 0; @@ -4555,7 +4638,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf) */ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; enum ice_status ret; int err; @@ -4601,7 +4684,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) err = ice_update_link_info(hw->port_info); if (err) - dev_err(&pf->pdev->dev, "Get link status error %d\n", err); + dev_err(dev, "Get link status error %d\n", err); /* start misc vector */ err = ice_req_irq_msix_misc(pf); @@ -4760,7 +4843,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; enum ice_status status; + struct device *dev; + dev = ice_pf_to_dev(pf); if (seed) { struct ice_aqc_get_set_rss_keys *buf = (struct ice_aqc_get_set_rss_keys *)seed; @@ -4768,8 +4853,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) status = ice_aq_set_rss_key(hw, vsi->idx, buf); if (status) { - dev_err(&pf->pdev->dev, - "Cannot set RSS key, err %d aq_err %d\n", + dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n", status, hw->adminq.rq_last_status); return -EIO; } @@ -4779,8 +4863,7 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, lut_size); if (status) { - dev_err(&pf->pdev->dev, - "Cannot set RSS lut, err %d aq_err %d\n", + dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n", status, hw->adminq.rq_last_status); return -EIO; } @@ -4803,15 +4886,16 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; enum ice_status status; + struct device *dev; + dev = ice_pf_to_dev(pf); if (seed) { struct ice_aqc_get_set_rss_keys *buf = (struct ice_aqc_get_set_rss_keys *)seed; status = ice_aq_get_rss_key(hw, vsi->idx, buf); if (status) { - dev_err(&pf->pdev->dev, - "Cannot get RSS key, err %d aq_err %d\n", + dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n", status, hw->adminq.rq_last_status); return -EIO; } @@ -4821,8 +4905,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, lut_size); if (status) { - dev_err(&pf->pdev->dev, - "Cannot get RSS lut, err %d aq_err %d\n", + dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n", status, hw->adminq.rq_last_status); return -EIO; } @@ -4866,7 +4949,6 @@ ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, */ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) { - struct device *dev = &vsi->back->pdev->dev; struct ice_aqc_vsi_props *vsi_props; struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; @@ -4875,7 +4957,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) vsi_props = &vsi->info; - ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return -ENOMEM; @@ -4891,7 +4973,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", + dev_err(&vsi->back->pdev->dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", bmode, status, hw->adminq.sq_last_status); ret = -EIO; goto out; @@ -4900,7 +4982,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) vsi_props->sw_flags = ctxt->info.sw_flags; out: - devm_kfree(dev, ctxt); + kfree(ctxt); return ret; } @@ -5222,6 +5304,7 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_set_vf_trust = ice_set_vf_trust, .ndo_set_vf_vlan = ice_set_vf_port_vlan, .ndo_set_vf_link_state = ice_set_vf_link_state, + .ndo_get_vf_stats = ice_get_vf_stats, .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_set_features = ice_set_features, diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 84f609996ed5..eae707ddf8e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -798,8 +798,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw) hw->layer_info = NULL; } - if (hw->port_info) - ice_sched_clear_port(hw->port_info); + ice_sched_clear_port(hw->port_info); hw->num_tx_sched_layers = 0; hw->num_tx_sched_phys_layers = 0; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 77d211ea3aae..b5a53f862a83 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -2428,7 +2428,7 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; - if (vid) + if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) recipe_id = ICE_SW_LKUP_PROMISC_VLAN; else recipe_id = ICE_SW_LKUP_PROMISC; @@ -2440,13 +2440,18 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, mutex_lock(rule_lock); list_for_each_entry(itr, rule_head, list_entry) { + struct ice_fltr_info *fltr_info; u8 fltr_promisc_mask = 0; if (!ice_vsi_uses_fltr(itr, vsi_handle)) continue; + fltr_info = &itr->fltr_info; + + if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && + vid != fltr_info->l_data.mac_vlan.vlan_id) + continue; - fltr_promisc_mask |= - ice_determine_promisc_mask(&itr->fltr_info); + fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); /* Skip if filter is not completely specified by given mask */ if (fltr_promisc_mask & ~promisc_mask) @@ -2454,7 +2459,7 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, &remove_list_head, - &itr->fltr_info); + fltr_info); if (status) { mutex_unlock(rule_lock); goto free_fltr_list; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index eba8b04b8cbd..c4854a987130 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -202,6 +202,7 @@ struct ice_hw_dev_caps { struct ice_hw_common_caps common_cap; u32 num_vfs_exposed; /* Total number of VFs exposed */ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ + u32 num_funcs; }; /* MAC info */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 2ac83ad3d1a6..edb374296d1f 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -6,6 +6,35 @@ #include "ice_lib.h" /** + * ice_validate_vf_id - helper to check if VF ID is valid + * @pf: pointer to the PF structure + * @vf_id: the ID of the VF to check + */ +static int ice_validate_vf_id(struct ice_pf *pf, int vf_id) +{ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id); + return -EINVAL; + } + return 0; +} + +/** + * ice_check_vf_init - helper to check if VF init complete + * @pf: pointer to the PF structure + * @vf: the pointer to the VF to check + */ +static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf) +{ + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n", + vf->vf_id); + return -EBUSY; + } + return 0; +} + +/** * ice_err_to_virt err - translate errors for VF return code * @ice_err: error return code */ @@ -185,12 +214,14 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) { struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + struct device *dev; int first, last, v; struct ice_hw *hw; hw = &pf->hw; vsi = pf->vsi[vf->lan_vsi_idx]; + dev = ice_pf_to_dev(pf); wr32(hw, VPINT_ALLOC(vf->vf_id), 0); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); @@ -209,13 +240,12 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); else - dev_err(&pf->pdev->dev, - "Scattered mode for VF Tx queues is not yet implemented\n"); + dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); else - dev_err(&pf->pdev->dev, + dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); } @@ -290,6 +320,7 @@ static void ice_dis_vf_qs(struct ice_vf *vf) */ void ice_free_vfs(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int tmp, i; @@ -311,24 +342,24 @@ void ice_free_vfs(struct ice_pf *pf) if (!pci_vfs_assigned(pf->pdev)) pci_disable_sriov(pf->pdev); else - dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); tmp = pf->num_alloc_vfs; pf->num_vf_qps = 0; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { - /* disable VF qp mappings */ + /* disable VF qp mappings and set VF disable state */ ice_dis_vf_mappings(&pf->vf[i]); + set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); ice_free_vf_res(&pf->vf[i]); } } if (ice_sriov_free_msix_res(pf)) - dev_err(&pf->pdev->dev, - "Failed to free MSIX resources used by SR-IOV\n"); + dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); - devm_kfree(&pf->pdev->dev, pf->vf); + devm_kfree(dev, pf->vf); pf->vf = NULL; /* This check is for when the driver is unloaded while VFs are @@ -367,9 +398,11 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) { struct ice_pf *pf = vf->pf; u32 reg, reg_idx, bit_idx; + struct device *dev; struct ice_hw *hw; int vf_abs_id, i; + dev = ice_pf_to_dev(pf); hw = &pf->hw; vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; @@ -415,7 +448,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) if ((reg & VF_TRANS_PENDING_M) == 0) break; - dev_err(&pf->pdev->dev, + dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id); udelay(ICE_PCI_CIAD_WAIT_DELAY_US); } @@ -458,13 +491,12 @@ static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt) */ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) { - struct device *dev = &vsi->back->pdev->dev; struct ice_hw *hw = &vsi->back->hw; struct ice_vsi_ctx *ctxt; enum ice_status status; int ret = 0; - ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return -ENOMEM; @@ -476,7 +508,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { - dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n", + dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n", status, hw->adminq.sq_last_status); ret = -EIO; goto out; @@ -484,7 +516,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) vsi->info = ctxt->info; out: - devm_kfree(dev, ctxt); + kfree(ctxt); return ret; } @@ -532,14 +564,16 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) LIST_HEAD(tmp_add_list); u8 broadcast[ETH_ALEN]; struct ice_vsi *vsi; + struct device *dev; int status = 0; + dev = ice_pf_to_dev(pf); /* first vector index is the VFs OICR index */ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id); if (!vsi) { - dev_err(&pf->pdev->dev, "Failed to create VF VSI\n"); + dev_err(dev, "Failed to create VF VSI\n"); return -ENOMEM; } @@ -567,8 +601,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) status = ice_add_mac(&pf->hw, &tmp_add_list); if (status) - dev_err(&pf->pdev->dev, - "could not add mac filters error %d\n", status); + dev_err(dev, "could not add mac filters error %d\n", status); else vf->num_mac = 1; @@ -579,7 +612,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) * more vectors. */ ice_alloc_vsi_res_exit: - ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + ice_free_fltr_list(dev, &tmp_add_list); return status; } @@ -635,10 +668,12 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) int abs_vf_id, abs_first, abs_last; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + struct device *dev; int first, last, v; struct ice_hw *hw; u32 reg; + dev = ice_pf_to_dev(pf); hw = &pf->hw; vsi = pf->vsi[vf->lan_vsi_idx]; first = vf->first_vector_idx; @@ -686,8 +721,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) VPLAN_TX_QBASE_VFNUMQ_M)); wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); } else { - dev_err(&pf->pdev->dev, - "Scattered mode for VF Tx queues is not yet implemented\n"); + dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n"); } /* set regardless of mapping mode */ @@ -705,8 +739,7 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) VPLAN_RX_QBASE_VFNUMQ_M)); wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); } else { - dev_err(&pf->pdev->dev, - "Scattered mode for VF Rx queues is not yet implemented\n"); + dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); } } @@ -852,6 +885,7 @@ static int ice_check_avail_res(struct ice_pf *pf) { int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker); u16 num_msix, num_txq, num_rxq, num_avail_msix; + struct device *dev = ice_pf_to_dev(pf); if (!pf->num_alloc_vfs || max_valid_res_idx < 0) return -EINVAL; @@ -884,8 +918,7 @@ static int ice_check_avail_res(struct ice_pf *pf) ICE_DFLT_INTR_PER_VF, ICE_MIN_INTR_PER_VF); } else { - dev_err(&pf->pdev->dev, - "Number of VFs %d exceeds max VF count %d\n", + dev_err(dev, "Number of VFs %d exceeds max VF count %d\n", pf->num_alloc_vfs, ICE_MAX_VF_COUNT); return -EIO; } @@ -1023,12 +1056,12 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m, */ static bool ice_config_res_vfs(struct ice_pf *pf) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int v; if (ice_check_avail_res(pf)) { - dev_err(&pf->pdev->dev, - "Cannot allocate VF resources, try with fewer number of VFs\n"); + dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n"); return false; } @@ -1041,9 +1074,8 @@ static bool ice_config_res_vfs(struct ice_pf *pf) struct ice_vf *vf = &pf->vf[v]; vf->num_vf_qs = pf->num_vf_qps; - dev_dbg(&pf->pdev->dev, - "VF-id %d has %d queues configured\n", - vf->vf_id, vf->num_vf_qs); + dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id, + vf->num_vf_qs); ice_cleanup_and_realloc_vf(vf); } @@ -1067,6 +1099,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf) */ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_vf *vf; int v, i; @@ -1125,7 +1158,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) * time, but continue on with the operation. */ if (v < pf->num_alloc_vfs) - dev_warn(&pf->pdev->dev, "VF reset check timeout\n"); + dev_warn(dev, "VF reset check timeout\n"); /* free VF resources to begin resetting the VSI state */ for (v = 0; v < pf->num_alloc_vfs; v++) { @@ -1142,8 +1175,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) } if (ice_sriov_free_msix_res(pf)) - dev_err(&pf->pdev->dev, - "Failed to free MSIX resources used by SR-IOV\n"); + dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); if (!ice_config_res_vfs(pf)) return false; @@ -1181,16 +1213,18 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) { struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; + struct device *dev; struct ice_hw *hw; bool rsd = false; u8 promisc_m; u32 reg; int i; + dev = ice_pf_to_dev(pf); + if (ice_is_vf_disabled(vf)) { - dev_dbg(&pf->pdev->dev, - "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", - vf->vf_id); + dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", + vf->vf_id); return true; } @@ -1232,8 +1266,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) * continue on with the operation. */ if (!rsd) - dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n", - vf->vf_id); + dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id); /* disable promiscuous modes in case they were enabled * ignore any error if disabling process failed @@ -1247,7 +1280,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) vsi = pf->vsi[vf->lan_vsi_idx]; if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true)) - dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n"); + dev_err(dev, "disabling promiscuous mode failed\n"); } /* free VF resources to begin resetting the VSI state */ @@ -1298,19 +1331,26 @@ void ice_vc_notify_reset(struct ice_pf *pf) static void ice_vc_notify_vf_reset(struct ice_vf *vf) { struct virtchnl_pf_event pfe; + struct ice_pf *pf; - /* validate the request */ - if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + if (!vf) + return; + + pf = vf->pf; + if (ice_validate_vf_id(pf, vf->vf_id)) return; - /* verify if the VF is in either init or active before proceeding */ - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && - !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + /* Bail out if VF is in disabled state, neither initialized, nor active + * state - otherwise proceed with notifications + */ + if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && + !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) || + test_bit(ICE_VF_STATE_DIS, vf->vf_states)) return; pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; - ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, + ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -1322,6 +1362,7 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf) */ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; struct ice_vf *vfs; int i, ret; @@ -1338,8 +1379,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) goto err_unroll_intr; } /* allocate memory */ - vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs), - GFP_KERNEL); + vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL); if (!vfs) { ret = -ENOMEM; goto err_pci_disable_sriov; @@ -1368,7 +1408,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) err_unroll_sriov: pf->vf = NULL; - devm_kfree(&pf->pdev->dev, vfs); + devm_kfree(dev, vfs); vfs = NULL; pf->num_alloc_vfs = 0; err_pci_disable_sriov: @@ -1413,7 +1453,7 @@ static bool ice_pf_state_is_nominal(struct ice_pf *pf) static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) { int pre_existing_vfs = pci_num_vf(pf->pdev); - struct device *dev = &pf->pdev->dev; + struct device *dev = ice_pf_to_dev(pf); int err; if (!ice_pf_state_is_nominal(pf)) { @@ -1458,10 +1498,10 @@ static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct ice_pf *pf = pci_get_drvdata(pdev); + struct device *dev = ice_pf_to_dev(pf); if (ice_is_safe_mode(pf)) { - dev_err(&pf->pdev->dev, - "SR-IOV cannot be configured - Device is in Safe Mode\n"); + dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n"); return -EOPNOTSUPP; } @@ -1471,8 +1511,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!pci_vfs_assigned(pdev)) { ice_free_vfs(pf); } else { - dev_err(&pf->pdev->dev, - "can't free VFs because some are assigned to VMs.\n"); + dev_err(dev, "can't free VFs because some are assigned to VMs.\n"); return -EBUSY; } @@ -1535,24 +1574,28 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { enum ice_status aq_ret; + struct device *dev; struct ice_pf *pf; - /* validate the request */ - if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + if (!vf) return -EINVAL; pf = vf->pf; + if (ice_validate_vf_id(pf, vf->vf_id)) + return -EINVAL; + + dev = ice_pf_to_dev(pf); /* single place to detect unsuccessful return values */ if (v_retval) { vf->num_inval_msgs++; - dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", - vf->vf_id, v_opcode, v_retval); + dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, + v_opcode, v_retval); if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { - dev_err(&pf->pdev->dev, + dev_err(dev, "Number of invalid messages exceeded for VF %d\n", vf->vf_id); - dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); + dev_err(dev, "Use PF Control I/F to enable the VF\n"); set_bit(ICE_VF_STATE_DIS, vf->vf_states); return -EIO; } @@ -1565,7 +1608,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { - dev_info(&pf->pdev->dev, + dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n", vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status); return -EIO; @@ -1613,14 +1656,14 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) int len = 0; int ret; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + if (ice_check_vf_init(pf, vf)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; } len = sizeof(struct virtchnl_vf_resource); - vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL); + vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; len = 0; @@ -1686,6 +1729,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->dflt_lan_addr.addr); + /* match guest capabilities */ + vf->driver_caps = vfres->vf_cap_flags; + set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); err: @@ -1693,7 +1739,7 @@ err: ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret, (u8 *)vfres, len); - devm_kfree(&pf->pdev->dev, vfres); + kfree(vfres); return ret; } @@ -1789,7 +1835,7 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi = NULL; + struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1836,7 +1882,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi = NULL; + struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1883,8 +1929,8 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_queue_select *vqs = (struct virtchnl_queue_select *)msg; + struct ice_eth_stats stats = { 0 }; struct ice_pf *pf = vf->pf; - struct ice_eth_stats stats; struct ice_vsi *vsi; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { @@ -1903,7 +1949,6 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - memset(&stats, 0, sizeof(struct ice_eth_stats)); ice_update_eth_stats(vsi); stats = vsi->eth_stats; @@ -2268,7 +2313,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF || qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { - dev_err(&pf->pdev->dev, + dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2381,9 +2426,12 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) enum virtchnl_ops vc_op; enum ice_status status; struct ice_vsi *vsi; + struct device *dev; int mac_count = 0; int i; + dev = ice_pf_to_dev(pf); + if (set) vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; else @@ -2397,7 +2445,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) if (set && !ice_is_vf_trusted(vf) && (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { - dev_err(&pf->pdev->dev, + dev_err(dev, "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n", vf->vf_id); /* There is no need to let VF know about not being trusted @@ -2422,13 +2470,13 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) /* VF is trying to add filters that the PF * already added. Just continue. */ - dev_info(&pf->pdev->dev, + dev_info(dev, "MAC %pM already set for VF %d\n", maddr, vf->vf_id); continue; } else { /* VF can't remove dflt_lan_addr/bcast MAC */ - dev_err(&pf->pdev->dev, + dev_err(dev, "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n", maddr, vf->vf_id); continue; @@ -2437,7 +2485,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) /* check for the invalid cases and bail if necessary */ if (is_zero_ether_addr(maddr)) { - dev_err(&pf->pdev->dev, + dev_err(dev, "invalid MAC %pM provided for VF %d\n", maddr, vf->vf_id); v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2446,7 +2494,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) if (is_unicast_ether_addr(maddr) && !ice_can_vf_change_mac(vf)) { - dev_err(&pf->pdev->dev, + dev_err(dev, "can't change unicast MAC for untrusted VF %d\n", vf->vf_id); v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2457,12 +2505,12 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) status = ice_vsi_cfg_mac_fltr(vsi, maddr, set); if (status == ICE_ERR_DOES_NOT_EXIST || status == ICE_ERR_ALREADY_EXISTS) { - dev_info(&pf->pdev->dev, + dev_info(dev, "can't %s MAC filters %pM for VF %d, error %d\n", set ? "add" : "remove", maddr, vf->vf_id, status); } else if (status) { - dev_err(&pf->pdev->dev, + dev_err(dev, "can't %s MAC filters for VF %d, error %d\n", set ? "add" : "remove", vf->vf_id, status); v_ret = ice_err_to_virt_err(status); @@ -2527,7 +2575,9 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) u16 max_allowed_vf_queues; u16 tx_rx_queue_left; u16 cur_queues; + struct device *dev; + dev = ice_pf_to_dev(pf); if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2538,17 +2588,15 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) ice_get_avail_rxq_count(pf)); max_allowed_vf_queues = tx_rx_queue_left + cur_queues; if (!req_queues) { - dev_err(&pf->pdev->dev, - "VF %d tried to request 0 queues. Ignoring.\n", + dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", vf->vf_id); } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) { - dev_err(&pf->pdev->dev, - "VF %d tried to request more than %d queues.\n", + dev_err(dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, ICE_MAX_BASE_QS_PER_VF); vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF; } else if (req_queues > cur_queues && req_queues - cur_queues > tx_rx_queue_left) { - dev_warn(&pf->pdev->dev, + dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, @@ -2557,8 +2605,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) /* request is successful, then reset VF */ vf->num_req_qs = req_queues; ice_vc_reset_vf(vf); - dev_info(&pf->pdev->dev, - "VF %d granted request of %u queues.\n", + dev_info(dev, "VF %d granted request of %u queues.\n", vf->vf_id, req_queues); return 0; } @@ -2584,39 +2631,34 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto) { u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S); - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vsi *vsi; + struct device *dev; struct ice_vf *vf; int ret = 0; - /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id); + dev = ice_pf_to_dev(pf); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } if (vlan_id > ICE_MAX_VLANID || qos > 7) { - dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); + dev_err(dev, "Invalid VF Parameters\n"); return -EINVAL; } if (vlan_proto != htons(ETH_P_8021Q)) { - dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); + dev_err(dev, "VF VLAN protocol is not supported\n"); return -EPROTONOSUPPORT; } vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } if (le16_to_cpu(vsi->info.pvid) == vlanprio) { /* duplicate request, so just return success */ - dev_info(&pf->pdev->dev, - "Duplicate pvid %d request\n", vlanprio); + dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio); return ret; } @@ -2635,7 +2677,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, } if (vlan_id) { - dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", + dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n", vlan_id, qos, vf_id); /* add new VLAN filter for each MAC */ @@ -2654,6 +2696,17 @@ error_set_pvid: } /** + * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads + * @caps: VF driver negotiated capabilities + * + * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false + */ +static bool ice_vf_vlan_offload_ena(u32 caps) +{ + return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN); +} + +/** * ice_vc_process_vlan_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2669,16 +2722,23 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) struct ice_pf *pf = vf->pf; bool vlan_promisc = false; struct ice_vsi *vsi; + struct device *dev; struct ice_hw *hw; int status = 0; u8 promisc_m; int i; + dev = ice_pf_to_dev(pf); if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } + if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@ -2686,7 +2746,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) if (add_v && !ice_is_vf_trusted(vf) && vf->num_vlan >= ICE_MAX_VLAN_PER_VF) { - dev_info(&pf->pdev->dev, + dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", vf->vf_id); /* There is no need to let VF know about being not trusted, @@ -2698,7 +2758,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) for (i = 0; i < vfl->num_elements; i++) { if (vfl->vlan_id[i] > ICE_MAX_VLANID) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(&pf->pdev->dev, + dev_err(dev, "invalid VF VLAN id %d\n", vfl->vlan_id[i]); goto error_param; } @@ -2716,14 +2776,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) goto error_param; } - if (ice_vsi_manage_vlan_stripping(vsi, add_v)) { - dev_err(&pf->pdev->dev, - "%sable VLAN stripping failed for VSI %i\n", - add_v ? "en" : "dis", vsi->vsi_num); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; - goto error_param; - } - if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) || test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) vlan_promisc = true; @@ -2734,7 +2786,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) if (!ice_is_vf_trusted(vf) && vf->num_vlan >= ICE_MAX_VLAN_PER_VF) { - dev_info(&pf->pdev->dev, + dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n", vf->vf_id); /* There is no need to let VF know about being @@ -2755,7 +2807,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) status = ice_cfg_vlan_pruning(vsi, true, false); if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(&pf->pdev->dev, + dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n", vid, status); goto error_param; @@ -2769,7 +2821,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) promisc_m, vid); if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; - dev_err(&pf->pdev->dev, + dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", vid, status); } @@ -2864,6 +2916,11 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) goto error_param; } + if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (ice_vsi_manage_vlan_stripping(vsi, true)) v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2890,6 +2947,11 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) goto error_param; } + if (!ice_vf_vlan_offload_ena(vf->driver_caps)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2905,6 +2967,33 @@ error_param: } /** + * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization + * @vf: VF to enable/disable VLAN stripping for on initialization + * + * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if + * the flag is cleared then we want to disable stripping. For example, the flag + * will be cleared when port VLANs are configured by the administrator before + * passing the VF to the guest or if the AVF driver doesn't support VLAN + * offloads. + */ +static int ice_vf_init_vlan_stripping(struct ice_vf *vf) +{ + struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + + if (!vsi) + return -EINVAL; + + /* don't modify stripping if port VLAN is configured */ + if (vsi->info.pvid) + return 0; + + if (ice_vf_vlan_offload_ena(vf->driver_caps)) + return ice_vsi_manage_vlan_stripping(vsi, true); + else + return ice_vsi_manage_vlan_stripping(vsi, false); +} + +/** * ice_vc_process_vf_msg - Process request from VF * @pf: pointer to the PF structure * @event: pointer to the AQ event @@ -2919,9 +3008,11 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) u16 msglen = event->msg_len; u8 *msg = event->msg_buf; struct ice_vf *vf = NULL; + struct device *dev; int err = 0; - if (vf_id >= pf->num_alloc_vfs) { + dev = ice_pf_to_dev(pf); + if (ice_validate_vf_id(pf, vf_id)) { err = -EINVAL; goto error_handler; } @@ -2947,7 +3038,7 @@ error_handler: if (err) { ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); - dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", + dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", vf_id, v_opcode, msglen, err); return; } @@ -2958,6 +3049,10 @@ error_handler: break; case VIRTCHNL_OP_GET_VF_RESOURCES: err = ice_vc_get_vf_res_msg(vf, msg); + if (ice_vf_init_vlan_stripping(vf)) + dev_err(dev, + "Failed to initialize VLAN stripping for VF %d\n", + vf->vf_id); ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: @@ -3008,8 +3103,8 @@ error_handler: break; case VIRTCHNL_OP_UNKNOWN: default: - dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", - v_opcode, vf_id); + dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode, + vf_id); err = ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 0); @@ -3019,8 +3114,7 @@ error_handler: /* Helper function cares less about error return values here * as it is busy with pending work. */ - dev_info(&pf->pdev->dev, - "PF failed to honor VF %d, opcode %d, error %d\n", + dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", vf_id, v_opcode, err); } } @@ -3036,24 +3130,18 @@ error_handler: int ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vsi *vsi; struct ice_vf *vf; - /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - netdev_err(netdev, "invalid VF id: %d\n", vf_id); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } vf = &pf->vf[vf_id]; vsi = pf->vsi[vf->lan_vsi_idx]; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } ivi->vf = vf_id; ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr); @@ -3086,33 +3174,29 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) */ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_vsi *vsi = pf->vsi[0]; struct ice_vsi_ctx *ctx; enum ice_status status; + struct device *dev; struct ice_vf *vf; int ret = 0; - /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - netdev_err(netdev, "invalid VF id: %d\n", vf_id); + dev = ice_pf_to_dev(pf); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } vf = &pf->vf[vf_id]; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } if (ena == vf->spoofchk) { - dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n", + dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF"); return 0; } - ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -3125,7 +3209,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); if (status) { - dev_dbg(&pf->pdev->dev, + dev_dbg(dev, "Error %d, failed to update VSI* parameters\n", status); ret = -EIO; goto out; @@ -3135,7 +3219,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) vsi->info.sec_flags = ctx->info.sec_flags; vsi->info.sw_flags2 = ctx->info.sw_flags2; out: - devm_kfree(&pf->pdev->dev, ctx); + kfree(ctx); return ret; } @@ -3166,17 +3250,12 @@ static void ice_wait_on_vf_reset(struct ice_vf *vf) */ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_vf *vf; int ret = 0; - /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - netdev_err(netdev, "invalid VF id: %d\n", vf_id); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } vf = &pf->vf[vf_id]; /* Don't set MAC on disabled VF */ @@ -3188,10 +3267,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) */ ice_wait_on_vf_reset(vf); - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) { netdev_err(netdev, "%pM not a valid unicast address\n", mac); @@ -3223,16 +3300,13 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) */ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct device *dev; struct ice_vf *vf; - /* validate the request */ - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id); + dev = ice_pf_to_dev(pf); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } vf = &pf->vf[vf_id]; /* Don't set Trusted Mode on disabled VF */ @@ -3244,10 +3318,8 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) */ ice_wait_on_vf_reset(vf); - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } /* Check if already trusted */ if (trusted == vf->trusted) @@ -3255,7 +3327,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) vf->trusted = trusted; ice_vc_reset_vf(vf); - dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", + dev_info(dev, "VF %u is now %strusted\n", vf_id, trusted ? "" : "un"); return 0; @@ -3271,26 +3343,21 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) */ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_pf *pf = np->vsi->back; + struct ice_pf *pf = ice_netdev_to_pf(netdev); struct virtchnl_pf_event pfe = { 0 }; struct ice_link_status *ls; struct ice_vf *vf; struct ice_hw *hw; - if (vf_id >= pf->num_alloc_vfs) { - dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + if (ice_validate_vf_id(pf, vf_id)) return -EINVAL; - } vf = &pf->vf[vf_id]; hw = &pf->hw; ls = &pf->hw.port_info->phy.link_info; - if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { - dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id); + if (ice_check_vf_init(pf, vf)) return -EBUSY; - } pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; @@ -3324,3 +3391,48 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) return 0; } + +/** + * ice_get_vf_stats - populate some stats for the VF + * @netdev: the netdev of the PF + * @vf_id: the host OS identifier (0-255) + * @vf_stats: pointer to the OS memory to be initialized + */ +int ice_get_vf_stats(struct net_device *netdev, int vf_id, + struct ifla_vf_stats *vf_stats) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_eth_stats *stats; + struct ice_vsi *vsi; + struct ice_vf *vf; + + if (ice_validate_vf_id(pf, vf_id)) + return -EINVAL; + + vf = &pf->vf[vf_id]; + + if (ice_check_vf_init(pf, vf)) + return -EBUSY; + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) + return -EINVAL; + + ice_update_eth_stats(vsi); + stats = &vsi->eth_stats; + + memset(vf_stats, 0, sizeof(*vf_stats)); + + vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast + + stats->rx_multicast; + vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast + + stats->tx_multicast; + vf_stats->rx_bytes = stats->rx_bytes; + vf_stats->tx_bytes = stats->tx_bytes; + vf_stats->broadcast = stats->rx_broadcast; + vf_stats->multicast = stats->rx_multicast; + vf_stats->rx_dropped = stats->rx_discards; + vf_stats->tx_dropped = stats->tx_discards; + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 2e867ad2e81d..88aa65d5cb31 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -122,6 +122,9 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); void ice_set_vf_state_qs_dis(struct ice_vf *vf); +int +ice_get_vf_stats(struct net_device *netdev, int vf_id, + struct ifla_vf_stats *vf_stats); #else /* CONFIG_PCI_IOV */ #define ice_process_vflr_event(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0) @@ -194,5 +197,13 @@ ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf, { return 0; } + +static inline int +ice_get_vf_stats(struct net_device __always_unused *netdev, + int __always_unused vf_id, + struct ifla_vf_stats __always_unused *vf_stats) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index fcffad0069d6..cf9b8b22d24f 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -331,7 +331,7 @@ static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem) struct device *dev; unsigned int i; - dev = &pf->pdev->dev; + dev = ice_pf_to_dev(pf); for (i = 0; i < umem->npgs; i++) { dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, @@ -369,7 +369,7 @@ static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem) struct device *dev; unsigned int i; - dev = &pf->pdev->dev; + dev = ice_pf_to_dev(pf); for (i = 0; i < umem->npgs; i++) { dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 12e03b15f0ab..a06d109c9e80 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1846,7 +1846,6 @@ static int mvneta_rx_refill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, gfp_t gfp_mask) { - enum dma_data_direction dma_dir; dma_addr_t phys_addr; struct page *page; @@ -1856,9 +1855,6 @@ static int mvneta_rx_refill(struct mvneta_port *pp, return -ENOMEM; phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; - dma_dir = page_pool_get_dma_dir(rxq->page_pool); - dma_sync_single_for_device(pp->dev->dev.parent, phys_addr, - MVNETA_MAX_RX_BUF_SIZE, dma_dir); mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); return 0; @@ -2097,7 +2093,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, err = xdp_do_redirect(pp->dev, xdp, prog); if (err) { ret = MVNETA_XDP_DROPPED; - xdp_return_buff(xdp); + __page_pool_put_page(rxq->page_pool, + virt_to_head_page(xdp->data), + xdp->data_end - xdp->data_hard_start, + true); } else { ret = MVNETA_XDP_REDIR; } @@ -2106,7 +2105,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, case XDP_TX: ret = mvneta_xdp_xmit_back(pp, xdp); if (ret != MVNETA_XDP_TX) - xdp_return_buff(xdp); + __page_pool_put_page(rxq->page_pool, + virt_to_head_page(xdp->data), + xdp->data_end - xdp->data_hard_start, + true); break; default: bpf_warn_invalid_xdp_action(act); @@ -2115,8 +2117,10 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, trace_xdp_exception(pp->dev, prog, act); /* fall through */ case XDP_DROP: - page_pool_recycle_direct(rxq->page_pool, - virt_to_head_page(xdp->data)); + __page_pool_put_page(rxq->page_pool, + virt_to_head_page(xdp->data), + xdp->data_end - xdp->data_hard_start, + true); ret = MVNETA_XDP_DROPPED; break; } @@ -3065,11 +3069,13 @@ static int mvneta_create_page_pool(struct mvneta_port *pp, struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); struct page_pool_params pp_params = { .order = 0, - .flags = PP_FLAG_DMA_MAP, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .pool_size = size, .nid = cpu_to_node(0), .dev = pp->dev->dev.parent, .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = pp->rx_offset_correction, + .max_len = MVNETA_MAX_RX_BUF_SIZE, }; int err; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 0bbb2eb1446e..11e5921c55b9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -608,6 +608,8 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, u16 pcifunc; int pf, lf; + *stat = 0; + if (!cgxd || !rvu) return -EINVAL; @@ -624,7 +626,6 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, return 0; block = &rvu->hw->block[blkaddr]; - *stat = 0; for (lf = 0; lf < block->lf.max; lf++) { /* Check if a lf is attached to this PF or one of its VFs */ if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc & diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index d8313e2ee600..a1202e53710c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1745,6 +1745,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); break; case ETHTOOL_GRXCLSRLALL: + cmd->data = MAX_NUM_OF_FS_RULES; while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { err = mlx4_en_get_flow(dev, cmd, i); if (!err) @@ -1811,6 +1812,7 @@ static int mlx4_en_set_channels(struct net_device *dev, struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_port_profile new_prof; struct mlx4_en_priv *tmp; + int total_tx_count; int port_up = 0; int xdp_count; int err = 0; @@ -1825,13 +1827,12 @@ static int mlx4_en_set_channels(struct net_device *dev, mutex_lock(&mdev->state_lock); xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0; - if (channel->tx_count * priv->prof->num_up + xdp_count > - priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) { + total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count; + if (total_tx_count > MAX_TX_RINGS) { err = -EINVAL; en_err(priv, "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", - channel->tx_count * priv->prof->num_up + xdp_count, - MAX_TX_RINGS); + total_tx_count, MAX_TX_RINGS); goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 40ec5acf79c0..7af75b63245f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -91,6 +91,7 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_port_profile new_prof; struct mlx4_en_priv *tmp; + int total_count; int port_up = 0; int err = 0; @@ -104,6 +105,14 @@ int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc) MLX4_EN_NUM_UP_HIGH; new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up * new_prof.num_up; + total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP]; + if (total_count > MAX_TX_RINGS) { + err = -EINVAL; + en_err(priv, + "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", + total_count, MAX_TX_RINGS); + goto out; + } err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); if (err) goto out; @@ -2286,11 +2295,7 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, lockdep_is_held(&priv->mdev->state_lock)); if (xdp_prog && carry_xdp_prog) { - xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num); - if (IS_ERR(xdp_prog)) { - mlx4_en_free_resources(tmp); - return PTR_ERR(xdp_prog); - } + bpf_prog_add(xdp_prog, tmp->rx_ring_num); for (i = 0; i < tmp->rx_ring_num; i++) rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog, xdp_prog); @@ -2782,11 +2787,9 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) * program for a new one. */ if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) { - if (prog) { - prog = bpf_prog_add(prog, priv->rx_ring_num - 1); - if (IS_ERR(prog)) - return PTR_ERR(prog); - } + if (prog) + bpf_prog_add(prog, priv->rx_ring_num - 1); + mutex_lock(&mdev->state_lock); for (i = 0; i < priv->rx_ring_num; i++) { old_prog = rcu_dereference_protected( @@ -2807,13 +2810,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) if (!tmp) return -ENOMEM; - if (prog) { - prog = bpf_prog_add(prog, priv->rx_ring_num - 1); - if (IS_ERR(prog)) { - err = PTR_ERR(prog); - goto out; - } - } + if (prog) + bpf_prog_add(prog, priv->rx_ring_num - 1); mutex_lock(&mdev->state_lock); memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); @@ -2862,7 +2860,6 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) unlock_out: mutex_unlock(&mdev->state_lock); -out: kfree(tmp); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 327c93a7bd55..95601269fa2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -708,9 +708,9 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev, static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings, u32 eth_proto_cap, - u8 connector_type) + u8 connector_type, bool ext) { - if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { + if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) @@ -842,9 +842,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = { [MLX5E_PORT_OTHER] = PORT_OTHER, }; -static u8 get_connector_port(u32 eth_proto, u8 connector_type) +static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext) { - if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) + if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) return ptys2connector_type[connector_type]; if (eth_proto & @@ -945,9 +945,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; link_ksettings->base.port = get_connector_port(eth_proto_oper, - connector_type); + connector_type, ext); ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, - connector_type); + connector_type, ext); get_lp_advertising(mdev, eth_proto_lp, link_ksettings); if (an_status == MLX5_AN_COMPLETE) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 06a592fb62bf..09ed7f5f688b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -409,12 +409,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->stats = &c->priv->channel_stats[c->ix].rq; INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work); - rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; - if (IS_ERR(rq->xdp_prog)) { - err = PTR_ERR(rq->xdp_prog); - rq->xdp_prog = NULL; - goto err_rq_wq_destroy; - } + if (params->xdp_prog) + bpf_prog_inc(params->xdp_prog); + rq->xdp_prog = params->xdp_prog; rq_xdp_ix = rq->ix; if (xsk) @@ -4253,9 +4250,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, switch (proto) { case IPPROTO_GRE: + return features; case IPPROTO_IPIP: case IPPROTO_IPV6: - return features; + if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP)) + return features; + break; case IPPROTO_UDP: udph = udp_hdr(skb); port = be16_to_cpu(udph->dest); @@ -4407,16 +4407,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) /* no need for full reset when exchanging programs */ reset = (!priv->channels.params.xdp_prog || !prog); - if (was_opened && !reset) { + if (was_opened && !reset) /* num_channels is invariant here, so we can take the * batched reference right upfront. */ - prog = bpf_prog_add(prog, priv->channels.num); - if (IS_ERR(prog)) { - err = PTR_ERR(prog); - goto unlock; - } - } + bpf_prog_add(prog, priv->channels.num); if (was_opened && reset) { struct mlx5e_channels new_channels = {}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 82cffb3a9964..9e9960146e5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1386,6 +1386,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return 0; + if (rq->page_pool) + page_pool_nid_changed(rq->page_pool, numa_mem_id()); + if (rq->cqd.left) { work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); if (rq->cqd.left || work_done >= budget) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3a707d788022..761fc35c4aab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3291,7 +3291,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; - if (netdev_port_same_parent_id(priv->netdev, out_dev)) { + if (encap) { + parse_attr->mirred_ifindex[attr->out_count] = + out_dev->ifindex; + parse_attr->tun_info[attr->out_count] = dup_tun_info(info); + if (!parse_attr->tun_info[attr->out_count]) + return -ENOMEM; + encap = false; + attr->dests[attr->out_count].flags |= + MLX5_ESW_DEST_ENCAP; + attr->out_count++; + /* attr->dests[].rep is resolved when we + * handle encap + */ + } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); struct net_device *uplink_upper; @@ -3333,19 +3346,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, attr->dests[attr->out_count].rep = rpriv->rep; attr->dests[attr->out_count].mdev = out_priv->mdev; attr->out_count++; - } else if (encap) { - parse_attr->mirred_ifindex[attr->out_count] = - out_dev->ifindex; - parse_attr->tun_info[attr->out_count] = dup_tun_info(info); - if (!parse_attr->tun_info[attr->out_count]) - return -ENOMEM; - encap = false; - attr->dests[attr->out_count].flags |= - MLX5_ESW_DEST_ENCAP; - attr->out_count++; - /* attr->dests[].rep is resolved when we - * handle encap - */ } else if (parse_attr->filter_dev != priv->netdev) { /* All mlx5 devices are called to configure * high level device filters. Therefore, the @@ -4035,9 +4035,8 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, struct tc_cls_matchall_offload *ma) { struct netlink_ext_ack *extack = ma->common.extack; - int prio = TC_H_MAJ(ma->common.prio) >> 16; - if (prio != 1) { + if (ma->common.prio != 1) { NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f2e400a23a59..2c965ad0d744 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -2245,7 +2245,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, unlock: mutex_unlock(&esw->state_lock); - return 0; + return err; } int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 2c1dcb5084ab..d60577484567 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -587,7 +587,7 @@ static void del_sw_flow_group(struct fs_node *node) rhashtable_destroy(&fg->ftes_hash); ida_destroy(&fg->fte_allocator); - if (ft->autogroup.active) + if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size) ft->autogroup.num_groups--; err = rhltable_remove(&ft->fgs_hash, &fg->hash, @@ -1134,6 +1134,8 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, ft->autogroup.active = true; ft->autogroup.required_groups = max_num_groups; + /* We save place for flow groups in addition to max types */ + ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1); return ft; } @@ -1336,8 +1338,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft return ERR_PTR(-ENOENT); if (ft->autogroup.num_groups < ft->autogroup.required_groups) - /* We save place for flow groups in addition to max types */ - group_size = ft->max_fte / (ft->autogroup.required_groups + 1); + group_size = ft->autogroup.group_size; /* ft->max_fte == ft->autogroup.max_types */ if (group_size == 0) @@ -1364,7 +1365,8 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft if (IS_ERR(fg)) goto out; - ft->autogroup.num_groups++; + if (group_size == ft->autogroup.group_size) + ft->autogroup.num_groups++; out: return fg; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index f278298b0f6e..e8cd997f413e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -162,6 +162,7 @@ struct mlx5_flow_table { struct { bool active; unsigned int required_groups; + unsigned int group_size; unsigned int num_groups; } autogroup; /* Protect fwd_rules */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 31fbfd6e8bb9..584074bbf669 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1564,6 +1564,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */ { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 8560460d97fd..32e94d2ee5e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -595,6 +595,18 @@ static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule, } } +static u16 dr_get_bits_per_mask(u16 byte_mask) +{ + u16 bits = 0; + + while (byte_mask) { + byte_mask = byte_mask & (byte_mask - 1); + bits++; + } + + return bits; +} + static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, struct mlx5dr_domain *dmn, struct mlx5dr_domain_rx_tx *nic_dmn) @@ -607,6 +619,9 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, if (!ctrl->may_grow) return false; + if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size) + return false; + if (ctrl->num_of_collisions >= ctrl->increase_threshold && (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold) return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 5df8436b2ae3..51803eef13dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -700,6 +700,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, unsigned int irqn; void *cqc, *in; __be64 *pas; + int vector; u32 i; cq = kzalloc(sizeof(*cq), GFP_KERNEL); @@ -728,7 +729,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, if (!in) goto err_cqwq; - err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); + vector = smp_processor_id() % mlx5_comp_vectors_count(mdev); + err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn); if (err) { kvfree(in); goto err_cqwq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 7e9d6cfc356f..dc7e41566b2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -568,18 +568,6 @@ bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste) return !refcount_read(&ste->refcount); } -static u16 get_bits_per_mask(u16 byte_mask) -{ - u16 bits = 0; - - while (byte_mask) { - byte_mask = byte_mask & (byte_mask - 1); - bits++; - } - - return bits; -} - /* Init one ste as a pattern for ste data array */ void mlx5dr_ste_set_formatted_ste(u16 gvmi, struct mlx5dr_domain_rx_tx *nic_dmn, @@ -628,20 +616,12 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher, struct mlx5dr_ste_htbl *next_htbl; if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) { - u32 bits_in_mask; u8 next_lu_type; u16 byte_mask; next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type); byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask); - /* Don't allocate table more than required, - * the size of the table defined via the byte_mask, so no need - * to allocate more than that. - */ - bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE; - log_table_size = min(log_table_size, bits_in_mask); - next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, log_table_size, next_lu_type, @@ -679,7 +659,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl) htbl->ctrl.may_grow = true; - if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1) + if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask) htbl->ctrl.may_grow = false; /* Threshold is 50%, one is added to table of size 1 */ diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c index 67990406cba2..29e95d0a6ad1 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c @@ -66,6 +66,8 @@ retry: return err; if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) { + fsm_state_err = min_t(enum mlxfw_fsm_state_err, + fsm_state_err, MLXFW_FSM_STATE_ERR_MAX); pr_err("Firmware flash failed: %s\n", mlxfw_fsm_state_err_str[fsm_state_err]); NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 517cb8b14b1d..86e25824fcd8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -998,7 +998,7 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) if (d) return l3mdev_fib_table(d) ? : RT_TABLE_MAIN; else - return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN; + return RT_TABLE_MAIN; } static struct mlxsw_sp_rif * @@ -1602,27 +1602,10 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_ipip_entry *ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); - enum mlxsw_sp_l3proto ul_proto; - union mlxsw_sp_l3addr saddr; - u32 ul_tb_id; if (!ipip_entry) return 0; - /* For flat configuration cases, moving overlay to a different VRF might - * cause local address conflict, and the conflicting tunnels need to be - * demoted. - */ - ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); - ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; - saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); - if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, - saddr, ul_tb_id, - ipip_entry)) { - mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); - return 0; - } - return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, true, false, false, extack); } diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 90c46ba763d7..0e96ffab3b05 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -575,6 +575,32 @@ static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) return 0; } +int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port, + struct sk_buff *skb) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + struct ocelot *ocelot = ocelot_port->ocelot; + + if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP && + ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { + struct ocelot_skb *oskb = + kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC); + + if (unlikely(!oskb)) + return -ENOMEM; + + shinfo->tx_flags |= SKBTX_IN_PROGRESS; + + oskb->skb = skb; + oskb->id = ocelot_port->ts_id % 4; + + list_add_tail(&oskb->head, &ocelot_port->skbs); + return 0; + } + return -ENODATA; +} +EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb); + static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) { struct ocelot_port_private *priv = netdev_priv(dev); @@ -637,31 +663,17 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP && - ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { - struct ocelot_skb *oskb = - kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC); - - if (unlikely(!oskb)) - goto out; - - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - - oskb->skb = skb; - oskb->id = ocelot_port->ts_id % 4; + if (!ocelot_port_add_txtstamp_skb(ocelot_port, skb)) { ocelot_port->ts_id++; - - list_add_tail(&oskb->head, &ocelot_port->skbs); - return NETDEV_TX_OK; } -out: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } -void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts) +static void ocelot_get_hwtimestamp(struct ocelot *ocelot, + struct timespec64 *ts) { unsigned long flags; u32 val; @@ -686,7 +698,64 @@ void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts) spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); } -EXPORT_SYMBOL(ocelot_get_hwtimestamp); + +void ocelot_get_txtstamp(struct ocelot *ocelot) +{ + int budget = OCELOT_PTP_QUEUE_SZ; + + while (budget--) { + struct skb_shared_hwtstamps shhwtstamps; + struct list_head *pos, *tmp; + struct sk_buff *skb = NULL; + struct ocelot_skb *entry; + struct ocelot_port *port; + struct timespec64 ts; + u32 val, id, txport; + + val = ocelot_read(ocelot, SYS_PTP_STATUS); + + /* Check if a timestamp can be retrieved */ + if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD)) + break; + + WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL); + + /* Retrieve the ts ID and Tx port */ + id = SYS_PTP_STATUS_PTP_MESS_ID_X(val); + txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); + + /* Retrieve its associated skb */ + port = ocelot->ports[txport]; + + list_for_each_safe(pos, tmp, &port->skbs) { + entry = list_entry(pos, struct ocelot_skb, head); + if (entry->id != id) + continue; + + skb = entry->skb; + + list_del(pos); + kfree(entry); + } + + /* Next ts */ + ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); + + if (unlikely(!skb)) + continue; + + /* Get the h/w timestamp */ + ocelot_get_hwtimestamp(ocelot, &ts); + + /* Set the timestamp into the skb */ + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb, &shhwtstamps); + + dev_kfree_skb_any(skb); + } +} +EXPORT_SYMBOL(ocelot_get_txtstamp); static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr) { @@ -1049,15 +1118,14 @@ static int ocelot_get_port_parent_id(struct net_device *dev, return 0; } -static int ocelot_hwstamp_get(struct ocelot *ocelot, int port, - struct ifreq *ifr) +int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) { return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config, sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0; } +EXPORT_SYMBOL(ocelot_hwstamp_get); -static int ocelot_hwstamp_set(struct ocelot *ocelot, int port, - struct ifreq *ifr) +int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) { struct ocelot_port *ocelot_port = ocelot->ports[port]; struct hwtstamp_config cfg; @@ -1120,6 +1188,7 @@ static int ocelot_hwstamp_set(struct ocelot *ocelot, int port, return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; } +EXPORT_SYMBOL(ocelot_hwstamp_set); static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 32fef4f495aa..c259114c48fd 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -74,12 +74,6 @@ struct ocelot_port_private { struct ocelot_port_tc tc; }; -struct ocelot_skb { - struct list_head head; - struct sk_buff *skb; - u8 id; -}; - u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 5541ec26f953..2da8eee27e98 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -190,60 +190,9 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg) { - int budget = OCELOT_PTP_QUEUE_SZ; struct ocelot *ocelot = arg; - while (budget--) { - struct skb_shared_hwtstamps shhwtstamps; - struct list_head *pos, *tmp; - struct sk_buff *skb = NULL; - struct ocelot_skb *entry; - struct ocelot_port *port; - struct timespec64 ts; - u32 val, id, txport; - - val = ocelot_read(ocelot, SYS_PTP_STATUS); - - /* Check if a timestamp can be retrieved */ - if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD)) - break; - - WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL); - - /* Retrieve the ts ID and Tx port */ - id = SYS_PTP_STATUS_PTP_MESS_ID_X(val); - txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); - - /* Retrieve its associated skb */ - port = ocelot->ports[txport]; - - list_for_each_safe(pos, tmp, &port->skbs) { - entry = list_entry(pos, struct ocelot_skb, head); - if (entry->id != id) - continue; - - skb = entry->skb; - - list_del(pos); - kfree(entry); - } - - /* Next ts */ - ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); - - if (unlikely(!skb)) - continue; - - /* Get the h/w timestamp */ - ocelot_get_hwtimestamp(ocelot, &ts); - - /* Set the timestamp into the skb */ - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); - skb_tstamp_tx(skb, &shhwtstamps); - - dev_kfree_skb_any(skb); - } + ocelot_get_txtstamp(ocelot); return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 88fab6a82acf..06927ba5a3ae 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -46,9 +46,7 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, /* Grab a single ref to the map for our record. The prog destroy ndo * happens after free_used_maps(). */ - map = bpf_map_inc(map, false); - if (IS_ERR(map)) - return PTR_ERR(map); + bpf_map_inc(map); record = kmalloc(sizeof(*record), GFP_KERNEL); if (!record) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a220cc7c947a..481b096e984d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2115,12 +2115,8 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) if (rc) goto out; - fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); - if (IS_ERR(fp->rxq->xdp_prog)) { - rc = PTR_ERR(fp->rxq->xdp_prog); - fp->rxq->xdp_prog = NULL; - goto out; - } + bpf_prog_add(edev->xdp_prog, 1); + fp->rxq->xdp_prog = edev->xdp_prog; } if (fp->type & QEDE_FASTPATH_TX) { diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 5ecf61df78bd..baac016f3ec0 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -363,7 +363,7 @@ qcaspi_receive(struct qcaspi *qca) netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", available); - if (available > QCASPI_HW_BUF_LEN) { + if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) { /* This could only happen by interferences on the SPI line. * So retry later ... */ @@ -496,7 +496,6 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event) u16 signature = 0; u16 spi_config; u16 wrbuf_space = 0; - static u16 reset_count; if (event == QCASPI_EVENT_CPUON) { /* Read signature twice, if not valid @@ -549,13 +548,13 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event) qca->sync = QCASPI_SYNC_RESET; qca->stats.trig_reset++; - reset_count = 0; + qca->reset_count = 0; break; case QCASPI_SYNC_RESET: - reset_count++; + qca->reset_count++; netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n", - reset_count); - if (reset_count >= QCASPI_RESET_TIMEOUT) { + qca->reset_count); + if (qca->reset_count >= QCASPI_RESET_TIMEOUT) { /* reset did not seem to take place, try again */ qca->sync = QCASPI_SYNC_UNKNOWN; qca->stats.reset_timeout++; diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index eb9af45fcc5e..d13a67e20d65 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -94,6 +94,7 @@ struct qcaspi { unsigned int intr_req; unsigned int intr_svc; + u16 reset_count; #ifdef CONFIG_DEBUG_FS struct dentry *device_root; diff --git a/drivers/net/ethernet/realtek/r8169_firmware.c b/drivers/net/ethernet/realtek/r8169_firmware.c index 8f54a2c832eb..355cc810e322 100644 --- a/drivers/net/ethernet/realtek/r8169_firmware.c +++ b/drivers/net/ethernet/realtek/r8169_firmware.c @@ -37,7 +37,7 @@ struct fw_info { u8 chksum; } __packed; -#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code)) +#define FW_OPCODE_SIZE FIELD_SIZEOF(struct rtl_fw_phy_action, code[0]) static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) { @@ -92,19 +92,24 @@ static bool rtl_fw_data_ok(struct rtl_fw *rtl_fw) for (index = 0; index < pa->size; index++) { u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; u32 regno = (action & 0x0fff0000) >> 16; switch (action >> 28) { case PHY_READ: case PHY_DATA_OR: case PHY_DATA_AND: - case PHY_MDIO_CHG: case PHY_CLEAR_READCOUNT: case PHY_WRITE: case PHY_WRITE_PREVIOUS: case PHY_DELAY_MS: break; + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + case PHY_BJMPN: if (regno > index) goto out; @@ -164,12 +169,12 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) index -= (regno + 1); break; case PHY_MDIO_CHG: - if (data == 0) { - fw_write = rtl_fw->phy_write; - fw_read = rtl_fw->phy_read; - } else if (data == 1) { + if (data) { fw_write = rtl_fw->mac_mcu_write; fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; } break; @@ -198,7 +203,7 @@ void rtl_fw_write_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) index += regno; break; case PHY_DELAY_MS: - mdelay(data); + msleep(data); break; } } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index d8fcdb9db8d1..d47a038cb8d0 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -6952,8 +6952,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; } - /* RTL8168e-vl has a HW issue with TSO */ - if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + /* RTL8168e-vl and one RTL8168c variant are known to have a + * HW issue with TSO. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_22) { dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index ad68eb0cb8fd..4d9bbccc6f89 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4202,11 +4202,15 @@ static int efx_ef10_filter_push(struct efx_nic *efx, { MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); + size_t outlen; int rc; efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing); - rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc && spec->priority != EFX_FILTER_PRI_HINT) + efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf), + outbuf, outlen, rc); if (rc == 0) *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); if (rc == -ENOSPC) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 0fa9972027db..992c773620ec 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -355,7 +355,7 @@ static int efx_poll(struct napi_struct *napi, int budget) #ifdef CONFIG_RFS_ACCEL /* Perhaps expire some ARFS filters */ - schedule_work(&channel->filter_work); + mod_delayed_work(system_wq, &channel->filter_work, 0); #endif /* There is no race here; although napi_disable() will @@ -487,7 +487,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) } #ifdef CONFIG_RFS_ACCEL - INIT_WORK(&channel->filter_work, efx_filter_rfs_expire); + INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); #endif rx_queue = &channel->rx_queue; @@ -533,7 +533,7 @@ efx_copy_channel(const struct efx_channel *old_channel) memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); #ifdef CONFIG_RFS_ACCEL - INIT_WORK(&channel->filter_work, efx_filter_rfs_expire); + INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); #endif return channel; @@ -1969,6 +1969,8 @@ static int efx_probe_filters(struct efx_nic *efx) ++i) channel->rps_flow_id[i] = RPS_FLOW_ID_INVALID; + channel->rfs_expire_index = 0; + channel->rfs_filter_count = 0; } if (!success) { @@ -1978,8 +1980,6 @@ static int efx_probe_filters(struct efx_nic *efx) rc = -ENOMEM; goto out_unlock; } - - efx->rps_expire_index = efx->rps_expire_channel = 0; } #endif out_unlock: @@ -1993,8 +1993,10 @@ static void efx_remove_filters(struct efx_nic *efx) #ifdef CONFIG_RFS_ACCEL struct efx_channel *channel; - efx_for_each_channel(channel, efx) + efx_for_each_channel(channel, efx) { + cancel_delayed_work_sync(&channel->filter_work); kfree(channel->rps_flow_id); + } #endif down_write(&efx->filter_sem); efx->type->filter_table_remove(efx); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 45c7ae4114ec..2dd8d5002315 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -166,15 +166,20 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, #ifdef CONFIG_RFS_ACCEL int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); -bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); +bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota); static inline void efx_filter_rfs_expire(struct work_struct *data) { - struct efx_channel *channel = container_of(data, struct efx_channel, - filter_work); - - if (channel->rfs_filters_added >= 60 && - __efx_filter_rfs_expire(channel->efx, 100)) - channel->rfs_filters_added -= 60; + struct delayed_work *dwork = to_delayed_work(data); + struct efx_channel *channel; + unsigned int time, quota; + + channel = container_of(dwork, struct efx_channel, filter_work); + time = jiffies - channel->rfs_last_expiry; + quota = channel->rfs_filter_count * time / (30 * HZ); + if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) + channel->rfs_last_expiry += time; + /* Ensure we do more work eventually even if NAPI poll is not happening */ + schedule_delayed_work(dwork, 30 * HZ); } #define efx_filter_rfs_enabled() 1 #else diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 8db593fb9699..6a9347cd67f3 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -56,6 +56,9 @@ static u64 efx_get_atomic_stat(void *field) #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ EFX_ETHTOOL_STAT(field, channel, n_##field, \ unsigned int, efx_get_uint_stat) +#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \ + EFX_ETHTOOL_STAT(field, channel, field, \ + unsigned int, efx_get_uint_stat) #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ @@ -87,6 +90,9 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect), + EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed), }; #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 04e49eac7327..1f88212be085 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -439,6 +439,13 @@ enum efx_sync_events_state { * @event_test_cpu: Last CPU to handle interrupt or test event for this channel * @irq_count: Number of IRQs since last adaptive moderation decision * @irq_mod_score: IRQ moderation score + * @rfs_filter_count: number of accelerated RFS filters currently in place; + * equals the count of @rps_flow_id slots filled + * @rfs_last_expiry: value of jiffies last time some accelerated RFS filters + * were checked for expiry + * @rfs_expire_index: next accelerated RFS filter ID to check for expiry + * @n_rfs_succeeded: number of successful accelerated RFS filter insertions + * @n_rfs_failed; number of failed accelerated RFS filter insertions * @filter_work: Work item for efx_filter_rfs_expire() * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, * indexed by filter ID @@ -489,8 +496,12 @@ struct efx_channel { unsigned int irq_count; unsigned int irq_mod_score; #ifdef CONFIG_RFS_ACCEL - unsigned int rfs_filters_added; - struct work_struct filter_work; + unsigned int rfs_filter_count; + unsigned int rfs_last_expiry; + unsigned int rfs_expire_index; + unsigned int n_rfs_succeeded; + unsigned int n_rfs_failed; + struct delayed_work filter_work; #define RPS_FLOW_ID_INVALID 0xFFFFFFFF u32 *rps_flow_id; #endif @@ -923,9 +934,6 @@ struct efx_async_filter_insertion { * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state * @filter_state: Architecture-dependent filter table state * @rps_mutex: Protects RPS state of all channels - * @rps_expire_channel: Next channel to check for expiry - * @rps_expire_index: Next index to check for expiry in - * @rps_expire_channel's @rps_flow_id * @rps_slot_map: bitmap of in-flight entries in @rps_slot * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and @@ -1096,8 +1104,6 @@ struct efx_nic { void *filter_state; #ifdef CONFIG_RFS_ACCEL struct mutex rps_mutex; - unsigned int rps_expire_channel; - unsigned int rps_expire_index; unsigned long rps_slot_map; struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; spinlock_t rps_hash_lock; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 02ed6d1b716c..af15a737c675 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1531,7 +1531,8 @@ void efx_ptp_remove(struct efx_nic *efx) (void)efx_ptp_disable(efx); cancel_work_sync(&efx->ptp_data->work); - cancel_work_sync(&efx->ptp_data->pps_work); + if (efx->ptp_data->pps_workwq) + cancel_work_sync(&efx->ptp_data->pps_work); skb_queue_purge(&efx->ptp_data->rxq); skb_queue_purge(&efx->ptp_data->txq); diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index bec261905530..ef52b24ad9e7 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -988,6 +988,7 @@ static void efx_filter_rfs_work(struct work_struct *data) rc = efx->type->filter_insert(efx, &req->spec, true); if (rc >= 0) + /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */ rc %= efx->type->max_rx_ip_filters; if (efx->rps_hash_table) { spin_lock_bh(&efx->rps_hash_lock); @@ -1012,8 +1013,9 @@ static void efx_filter_rfs_work(struct work_struct *data) * later. */ mutex_lock(&efx->rps_mutex); + if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) + channel->rfs_filter_count++; channel->rps_flow_id[rc] = req->flow_id; - ++channel->rfs_filters_added; mutex_unlock(&efx->rps_mutex); if (req->spec.ether_type == htons(ETH_P_IP)) @@ -1030,6 +1032,28 @@ static void efx_filter_rfs_work(struct work_struct *data) req->spec.rem_host, ntohs(req->spec.rem_port), req->spec.loc_host, ntohs(req->spec.loc_port), req->rxq_index, req->flow_id, rc, arfs_id); + channel->n_rfs_succeeded++; + } else { + if (req->spec.ether_type == htons(ETH_P_IP)) + netif_dbg(efx, rx_status, efx->net_dev, + "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + else + netif_dbg(efx, rx_status, efx->net_dev, + "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + channel->n_rfs_failed++; + /* We're overloading the NIC's filter tables, so let's do a + * chunk of extra expiry work. + */ + __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, + 100u)); } /* Release references */ @@ -1139,38 +1163,44 @@ out_clear: return rc; } -bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) +bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota) { bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); - unsigned int channel_idx, index, size; + struct efx_nic *efx = channel->efx; + unsigned int index, size, start; u32 flow_id; if (!mutex_trylock(&efx->rps_mutex)) return false; expire_one = efx->type->filter_rfs_expire_one; - channel_idx = efx->rps_expire_channel; - index = efx->rps_expire_index; + index = channel->rfs_expire_index; + start = index; size = efx->type->max_rx_ip_filters; - while (quota--) { - struct efx_channel *channel = efx_get_channel(efx, channel_idx); + while (quota) { flow_id = channel->rps_flow_id[index]; - if (flow_id != RPS_FLOW_ID_INVALID && - expire_one(efx, flow_id, index)) { - netif_info(efx, rx_status, efx->net_dev, - "expired filter %d [queue %u flow %u]\n", - index, channel_idx, flow_id); - channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; + if (flow_id != RPS_FLOW_ID_INVALID) { + quota--; + if (expire_one(efx, flow_id, index)) { + netif_info(efx, rx_status, efx->net_dev, + "expired filter %d [channel %u flow %u]\n", + index, channel->channel, flow_id); + channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; + channel->rfs_filter_count--; + } } - if (++index == size) { - if (++channel_idx == efx->n_channels) - channel_idx = 0; + if (++index == size) index = 0; - } + /* If we were called with a quota that exceeds the total number + * of filters in the table (which shouldn't happen, but could + * if two callers race), ensure that we don't loop forever - + * stop when we've examined every row of the table. + */ + if (index == start) + break; } - efx->rps_expire_channel = channel_idx; - efx->rps_expire_index = index; + channel->rfs_expire_index = index; mutex_unlock(&efx->rps_mutex); return true; } diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 137632b09c72..9170572346b5 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -59,9 +59,24 @@ config TI_CPSW To compile this driver as a module, choose M here: the module will be called cpsw. +config TI_CPSW_SWITCHDEV + tristate "TI CPSW Switch Support with switchdev" + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST + select NET_SWITCHDEV + select TI_DAVINCI_MDIO + select MFD_SYSCON + select REGMAP + select NET_DEVLINK + imply PHY_TI_GMII_SEL + help + This driver supports TI's CPSW Ethernet Switch. + + To compile this driver as a module, choose M here: the module + will be called cpsw_new. + config TI_CPTS bool "TI Common Platform Time Sync (CPTS) Support" - depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST + depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST depends on COMMON_CLK depends on POSIX_TIMERS ---help--- @@ -73,7 +88,7 @@ config TI_CPTS config TI_CPTS_MOD tristate depends on TI_CPTS - default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y + default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y select NET_PTP_CLASSIFY imply PTP_1588_CLOCK default m diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index ed12e1e5df2f..d34df8e5cf94 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -15,6 +15,8 @@ obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o obj-$(CONFIG_TI_CPTS_MOD) += cpts.o obj-$(CONFIG_TI_CPSW) += ti_cpsw.o ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o +obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o +ti_cpsw_new-y := cpsw_switchdev.o cpsw_new.o davinci_cpdma.o cpsw_ale.o cpsw_sl.o cpsw_priv.o cpsw_ethtool.o obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o keystone_netcp-y := netcp_core.o cpsw_ale.o diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 329671e66fe4..6ae4a72e6f43 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -34,7 +34,6 @@ #include <net/page_pool.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> -#include <linux/filter.h> #include <linux/pinctrl/consumer.h> #include <net/pkt_cls.h> @@ -64,10 +63,6 @@ static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; module_param(descs_pool_size, int, 0444); MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool"); -/* The buf includes headroom compatible with both skb and xdpf */ -#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN) -#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long)) - #define for_each_slave(priv, func, arg...) \ do { \ struct cpsw_slave *slave; \ @@ -82,10 +77,16 @@ MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool"); (func)(slave++, ##arg); \ } while (0) -#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long)) +static int cpsw_slave_index_priv(struct cpsw_common *cpsw, + struct cpsw_priv *priv) +{ + return cpsw->data.dual_emac ? priv->emac_port : cpsw->data.active_slave; +} -#define CPSW_XDP_CONSUMED 1 -#define CPSW_XDP_PASS 0 +static int cpsw_get_slave_port(u32 slave_num) +{ + return slave_num + 1; +} static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid); @@ -332,218 +333,6 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) cpsw_del_mc_addr); } -void cpsw_intr_enable(struct cpsw_common *cpsw) -{ - writel_relaxed(0xFF, &cpsw->wr_regs->tx_en); - writel_relaxed(0xFF, &cpsw->wr_regs->rx_en); - - cpdma_ctlr_int_ctrl(cpsw->dma, true); - return; -} - -void cpsw_intr_disable(struct cpsw_common *cpsw) -{ - writel_relaxed(0, &cpsw->wr_regs->tx_en); - writel_relaxed(0, &cpsw->wr_regs->rx_en); - - cpdma_ctlr_int_ctrl(cpsw->dma, false); - return; -} - -static int cpsw_is_xdpf_handle(void *handle) -{ - return (unsigned long)handle & BIT(0); -} - -static void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf) -{ - return (void *)((unsigned long)xdpf | BIT(0)); -} - -static struct xdp_frame *cpsw_handle_to_xdpf(void *handle) -{ - return (struct xdp_frame *)((unsigned long)handle & ~BIT(0)); -} - -struct __aligned(sizeof(long)) cpsw_meta_xdp { - struct net_device *ndev; - int ch; -}; - -void cpsw_tx_handler(void *token, int len, int status) -{ - struct cpsw_meta_xdp *xmeta; - struct xdp_frame *xdpf; - struct net_device *ndev; - struct netdev_queue *txq; - struct sk_buff *skb; - int ch; - - if (cpsw_is_xdpf_handle(token)) { - xdpf = cpsw_handle_to_xdpf(token); - xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; - ndev = xmeta->ndev; - ch = xmeta->ch; - xdp_return_frame(xdpf); - } else { - skb = token; - ndev = skb->dev; - ch = skb_get_queue_mapping(skb); - cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb); - dev_kfree_skb_any(skb); - } - - /* Check whether the queue is stopped due to stalled tx dma, if the - * queue is stopped then start the queue as we have free desc for tx - */ - txq = netdev_get_tx_queue(ndev, ch); - if (unlikely(netif_tx_queue_stopped(txq))) - netif_tx_wake_queue(txq); - - ndev->stats.tx_packets++; - ndev->stats.tx_bytes += len; -} - -static void cpsw_rx_vlan_encap(struct sk_buff *skb) -{ - struct cpsw_priv *priv = netdev_priv(skb->dev); - struct cpsw_common *cpsw = priv->cpsw; - u32 rx_vlan_encap_hdr = *((u32 *)skb->data); - u16 vtag, vid, prio, pkt_type; - - /* Remove VLAN header encapsulation word */ - skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE); - - pkt_type = (rx_vlan_encap_hdr >> - CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) & - CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK; - /* Ignore unknown & Priority-tagged packets*/ - if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV || - pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG) - return; - - vid = (rx_vlan_encap_hdr >> - CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) & - VLAN_VID_MASK; - /* Ignore vid 0 and pass packet as is */ - if (!vid) - return; - /* Ignore default vlans in dual mac mode */ - if (cpsw->data.dual_emac && - vid == cpsw->slaves[priv->emac_port].port_vlan) - return; - - prio = (rx_vlan_encap_hdr >> - CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) & - CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK; - - vtag = (prio << VLAN_PRIO_SHIFT) | vid; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); - - /* strip vlan tag for VLAN-tagged packet */ - if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) { - memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); - skb_pull(skb, VLAN_HLEN); - } -} - -static int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf, - struct page *page) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_meta_xdp *xmeta; - struct cpdma_chan *txch; - dma_addr_t dma; - int ret, port; - - xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; - xmeta->ndev = priv->ndev; - xmeta->ch = 0; - txch = cpsw->txv[0].ch; - - port = priv->emac_port + cpsw->data.dual_emac; - if (page) { - dma = page_pool_get_dma_addr(page); - dma += xdpf->headroom + sizeof(struct xdp_frame); - ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf), - dma, xdpf->len, port); - } else { - if (sizeof(*xmeta) > xdpf->headroom) { - xdp_return_frame_rx_napi(xdpf); - return -EINVAL; - } - - ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf), - xdpf->data, xdpf->len, port); - } - - if (ret) { - priv->ndev->stats.tx_dropped++; - xdp_return_frame_rx_napi(xdpf); - } - - return ret; -} - -static int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, - struct page *page) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct net_device *ndev = priv->ndev; - int ret = CPSW_XDP_CONSUMED; - struct xdp_frame *xdpf; - struct bpf_prog *prog; - u32 act; - - rcu_read_lock(); - - prog = READ_ONCE(priv->xdp_prog); - if (!prog) { - ret = CPSW_XDP_PASS; - goto out; - } - - act = bpf_prog_run_xdp(prog, xdp); - switch (act) { - case XDP_PASS: - ret = CPSW_XDP_PASS; - break; - case XDP_TX: - xdpf = convert_to_xdp_frame(xdp); - if (unlikely(!xdpf)) - goto drop; - - cpsw_xdp_tx_frame(priv, xdpf, page); - break; - case XDP_REDIRECT: - if (xdp_do_redirect(ndev, xdp, prog)) - goto drop; - - /* Have to flush here, per packet, instead of doing it in bulk - * at the end of the napi handler. The RX devices on this - * particular hardware is sharing a common queue, so the - * incoming device might change per packet. - */ - xdp_do_flush_map(); - break; - default: - bpf_warn_invalid_xdp_action(act); - /* fall through */ - case XDP_ABORTED: - trace_xdp_exception(ndev, prog, act); - /* fall through -- handle aborts by dropping packet */ - case XDP_DROP: - goto drop; - } -out: - rcu_read_unlock(); - return ret; -drop: - rcu_read_unlock(); - page_pool_recycle_direct(cpsw->page_pool[ch], page); - return ret; -} - static unsigned int cpsw_rxbuf_total_len(unsigned int len) { len += CPSW_HEADROOM; @@ -552,123 +341,6 @@ static unsigned int cpsw_rxbuf_total_len(unsigned int len) return SKB_DATA_ALIGN(len); } -static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, - int size) -{ - struct page_pool_params pp_params; - struct page_pool *pool; - - pp_params.order = 0; - pp_params.flags = PP_FLAG_DMA_MAP; - pp_params.pool_size = size; - pp_params.nid = NUMA_NO_NODE; - pp_params.dma_dir = DMA_BIDIRECTIONAL; - pp_params.dev = cpsw->dev; - - pool = page_pool_create(&pp_params); - if (IS_ERR(pool)) - dev_err(cpsw->dev, "cannot create rx page pool\n"); - - return pool; -} - -static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct xdp_rxq_info *rxq; - struct page_pool *pool; - int ret; - - pool = cpsw->page_pool[ch]; - rxq = &priv->xdp_rxq[ch]; - - ret = xdp_rxq_info_reg(rxq, priv->ndev, ch); - if (ret) - return ret; - - ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); - if (ret) - xdp_rxq_info_unreg(rxq); - - return ret; -} - -static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch) -{ - struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch]; - - if (!xdp_rxq_info_is_reg(rxq)) - return; - - xdp_rxq_info_unreg(rxq); -} - -static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch) -{ - struct page_pool *pool; - int ret = 0, pool_size; - - pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); - pool = cpsw_create_page_pool(cpsw, pool_size); - if (IS_ERR(pool)) - ret = PTR_ERR(pool); - else - cpsw->page_pool[ch] = pool; - - return ret; -} - -void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw) -{ - struct net_device *ndev; - int i, ch; - - for (ch = 0; ch < cpsw->rx_ch_num; ch++) { - for (i = 0; i < cpsw->data.slaves; i++) { - ndev = cpsw->slaves[i].ndev; - if (!ndev) - continue; - - cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch); - } - - page_pool_destroy(cpsw->page_pool[ch]); - cpsw->page_pool[ch] = NULL; - } -} - -int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw) -{ - struct net_device *ndev; - int i, ch, ret; - - for (ch = 0; ch < cpsw->rx_ch_num; ch++) { - ret = cpsw_create_rx_pool(cpsw, ch); - if (ret) - goto err_cleanup; - - /* using same page pool is allowed as no running rx handlers - * simultaneously for both ndevs - */ - for (i = 0; i < cpsw->data.slaves; i++) { - ndev = cpsw->slaves[i].ndev; - if (!ndev) - continue; - - ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch); - if (ret) - goto err_cleanup; - } - } - - return 0; - -err_cleanup: - cpsw_destroy_xdp_rxqs(cpsw); - - return ret; -} - static void cpsw_rx_handler(void *token, int len, int status) { struct page *new_page, *page = token; @@ -735,7 +407,8 @@ static void cpsw_rx_handler(void *token, int len, int status) xdp.data_hard_start = pa; xdp.rxq = &priv->xdp_rxq[ch]; - ret = cpsw_run_xdp(priv, ch, &xdp, page); + port = priv->emac_port + cpsw->data.dual_emac; + ret = cpsw_run_xdp(priv, ch, &xdp, page, port); if (ret != CPSW_XDP_PASS) goto requeue; @@ -785,274 +458,6 @@ requeue: } } -void cpsw_split_res(struct cpsw_common *cpsw) -{ - u32 consumed_rate = 0, bigest_rate = 0; - struct cpsw_vector *txv = cpsw->txv; - int i, ch_weight, rlim_ch_num = 0; - int budget, bigest_rate_ch = 0; - u32 ch_rate, max_rate; - int ch_budget = 0; - - for (i = 0; i < cpsw->tx_ch_num; i++) { - ch_rate = cpdma_chan_get_rate(txv[i].ch); - if (!ch_rate) - continue; - - rlim_ch_num++; - consumed_rate += ch_rate; - } - - if (cpsw->tx_ch_num == rlim_ch_num) { - max_rate = consumed_rate; - } else if (!rlim_ch_num) { - ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num; - bigest_rate = 0; - max_rate = consumed_rate; - } else { - max_rate = cpsw->speed * 1000; - - /* if max_rate is less then expected due to reduced link speed, - * split proportionally according next potential max speed - */ - if (max_rate < consumed_rate) - max_rate *= 10; - - if (max_rate < consumed_rate) - max_rate *= 10; - - ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate; - ch_budget = (CPSW_POLL_WEIGHT - ch_budget) / - (cpsw->tx_ch_num - rlim_ch_num); - bigest_rate = (max_rate - consumed_rate) / - (cpsw->tx_ch_num - rlim_ch_num); - } - - /* split tx weight/budget */ - budget = CPSW_POLL_WEIGHT; - for (i = 0; i < cpsw->tx_ch_num; i++) { - ch_rate = cpdma_chan_get_rate(txv[i].ch); - if (ch_rate) { - txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate; - if (!txv[i].budget) - txv[i].budget++; - if (ch_rate > bigest_rate) { - bigest_rate_ch = i; - bigest_rate = ch_rate; - } - - ch_weight = (ch_rate * 100) / max_rate; - if (!ch_weight) - ch_weight++; - cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight); - } else { - txv[i].budget = ch_budget; - if (!bigest_rate_ch) - bigest_rate_ch = i; - cpdma_chan_set_weight(cpsw->txv[i].ch, 0); - } - - budget -= txv[i].budget; - } - - if (budget) - txv[bigest_rate_ch].budget += budget; - - /* split rx budget */ - budget = CPSW_POLL_WEIGHT; - ch_budget = budget / cpsw->rx_ch_num; - for (i = 0; i < cpsw->rx_ch_num; i++) { - cpsw->rxv[i].budget = ch_budget; - budget -= ch_budget; - } - - if (budget) - cpsw->rxv[0].budget += budget; -} - -static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) -{ - struct cpsw_common *cpsw = dev_id; - - writel(0, &cpsw->wr_regs->tx_en); - cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX); - - if (cpsw->quirk_irq) { - disable_irq_nosync(cpsw->irqs_table[1]); - cpsw->tx_irq_disabled = true; - } - - napi_schedule(&cpsw->napi_tx); - return IRQ_HANDLED; -} - -static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) -{ - struct cpsw_common *cpsw = dev_id; - - cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); - writel(0, &cpsw->wr_regs->rx_en); - - if (cpsw->quirk_irq) { - disable_irq_nosync(cpsw->irqs_table[0]); - cpsw->rx_irq_disabled = true; - } - - napi_schedule(&cpsw->napi_rx); - return IRQ_HANDLED; -} - -static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) -{ - u32 ch_map; - int num_tx, cur_budget, ch; - struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); - struct cpsw_vector *txv; - - /* process every unprocessed channel */ - ch_map = cpdma_ctrl_txchs_state(cpsw->dma); - for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) { - if (!(ch_map & 0x80)) - continue; - - txv = &cpsw->txv[ch]; - if (unlikely(txv->budget > budget - num_tx)) - cur_budget = budget - num_tx; - else - cur_budget = txv->budget; - - num_tx += cpdma_chan_process(txv->ch, cur_budget); - if (num_tx >= budget) - break; - } - - if (num_tx < budget) { - napi_complete(napi_tx); - writel(0xff, &cpsw->wr_regs->tx_en); - } - - return num_tx; -} - -static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) -{ - struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); - int num_tx; - - num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget); - if (num_tx < budget) { - napi_complete(napi_tx); - writel(0xff, &cpsw->wr_regs->tx_en); - if (cpsw->tx_irq_disabled) { - cpsw->tx_irq_disabled = false; - enable_irq(cpsw->irqs_table[1]); - } - } - - return num_tx; -} - -static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget) -{ - u32 ch_map; - int num_rx, cur_budget, ch; - struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); - struct cpsw_vector *rxv; - - /* process every unprocessed channel */ - ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); - for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) { - if (!(ch_map & 0x01)) - continue; - - rxv = &cpsw->rxv[ch]; - if (unlikely(rxv->budget > budget - num_rx)) - cur_budget = budget - num_rx; - else - cur_budget = rxv->budget; - - num_rx += cpdma_chan_process(rxv->ch, cur_budget); - if (num_rx >= budget) - break; - } - - if (num_rx < budget) { - napi_complete_done(napi_rx, num_rx); - writel(0xff, &cpsw->wr_regs->rx_en); - } - - return num_rx; -} - -static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) -{ - struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); - int num_rx; - - num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget); - if (num_rx < budget) { - napi_complete_done(napi_rx, num_rx); - writel(0xff, &cpsw->wr_regs->rx_en); - if (cpsw->rx_irq_disabled) { - cpsw->rx_irq_disabled = false; - enable_irq(cpsw->irqs_table[0]); - } - } - - return num_rx; -} - -static inline void soft_reset(const char *module, void __iomem *reg) -{ - unsigned long timeout = jiffies + HZ; - - writel_relaxed(1, reg); - do { - cpu_relax(); - } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies)); - - WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module); -} - -static void cpsw_set_slave_mac(struct cpsw_slave *slave, - struct cpsw_priv *priv) -{ - slave_write(slave, mac_hi(priv->mac_addr), SA_HI); - slave_write(slave, mac_lo(priv->mac_addr), SA_LO); -} - -static bool cpsw_shp_is_off(struct cpsw_priv *priv) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - u32 shift, mask, val; - - val = readl_relaxed(&cpsw->regs->ptype); - - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; - mask = 7 << shift; - val = val & mask; - - return !val; -} - -static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - u32 shift, mask, val; - - val = readl_relaxed(&cpsw->regs->ptype); - - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; - mask = (1 << --fifo) << shift; - val = on ? val | mask : val & ~mask; - - writel_relaxed(val, &cpsw->regs->ptype); -} - static void _cpsw_adjust_link(struct cpsw_slave *slave, struct cpsw_priv *priv, bool *link) { @@ -1118,44 +523,6 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, slave->mac_control = mac_control; } -static int cpsw_get_common_speed(struct cpsw_common *cpsw) -{ - int i, speed; - - for (i = 0, speed = 0; i < cpsw->data.slaves; i++) - if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link) - speed += cpsw->slaves[i].phy->speed; - - return speed; -} - -static int cpsw_need_resplit(struct cpsw_common *cpsw) -{ - int i, rlim_ch_num; - int speed, ch_rate; - - /* re-split resources only in case speed was changed */ - speed = cpsw_get_common_speed(cpsw); - if (speed == cpsw->speed || !speed) - return 0; - - cpsw->speed = speed; - - for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) { - ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch); - if (!ch_rate) - break; - - rlim_ch_num++; - } - - /* cases not dependent on speed */ - if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num) - return 0; - - return 1; -} - static void cpsw_adjust_link(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -1348,51 +715,6 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) } } -int cpsw_fill_rx_channels(struct cpsw_priv *priv) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_meta_xdp *xmeta; - struct page_pool *pool; - struct page *page; - int ch_buf_num; - int ch, i, ret; - dma_addr_t dma; - - for (ch = 0; ch < cpsw->rx_ch_num; ch++) { - pool = cpsw->page_pool[ch]; - ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); - for (i = 0; i < ch_buf_num; i++) { - page = page_pool_dev_alloc_pages(pool); - if (!page) { - cpsw_err(priv, ifup, "allocate rx page err\n"); - return -ENOMEM; - } - - xmeta = page_address(page) + CPSW_XMETA_OFFSET; - xmeta->ndev = priv->ndev; - xmeta->ch = ch; - - dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM; - ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch, - page, dma, - cpsw->rx_packet_max, - 0); - if (ret < 0) { - cpsw_err(priv, ifup, - "cannot submit page to channel %d rx, error %d\n", - ch, ret); - page_pool_recycle_direct(pool, page); - return ret; - } - } - - cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n", - ch, ch_buf_num); - } - - return 0; -} - static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) { u32 slave_port; @@ -1410,221 +732,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) cpsw_sl_ctl_reset(slave->mac_sl); } -static int cpsw_tc_to_fifo(int tc, int num_tc) -{ - if (tc == num_tc - 1) - return 0; - - return CPSW_FIFO_SHAPERS_NUM - tc; -} - -static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw) -{ - struct cpsw_common *cpsw = priv->cpsw; - u32 val = 0, send_pct, shift; - struct cpsw_slave *slave; - int pct = 0, i; - - if (bw > priv->shp_cfg_speed * 1000) - goto err; - - /* shaping has to stay enabled for highest fifos linearly - * and fifo bw no more then interface can allow - */ - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - send_pct = slave_read(slave, SEND_PERCENT); - for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) { - if (!bw) { - if (i >= fifo || !priv->fifo_bw[i]) - continue; - - dev_warn(priv->dev, "Prev FIFO%d is shaped", i); - continue; - } - - if (!priv->fifo_bw[i] && i > fifo) { - dev_err(priv->dev, "Upper FIFO%d is not shaped", i); - return -EINVAL; - } - - shift = (i - 1) * 8; - if (i == fifo) { - send_pct &= ~(CPSW_PCT_MASK << shift); - val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10); - if (!val) - val = 1; - - send_pct |= val << shift; - pct += val; - continue; - } - - if (priv->fifo_bw[i]) - pct += (send_pct >> shift) & CPSW_PCT_MASK; - } - - if (pct >= 100) - goto err; - - slave_write(slave, send_pct, SEND_PERCENT); - priv->fifo_bw[fifo] = bw; - - dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo, - DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100)); - - return 0; -err: - dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration"); - return -EINVAL; -} - -static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - u32 tx_in_ctl_rg, val; - int ret; - - ret = cpsw_set_fifo_bw(priv, fifo, bw); - if (ret) - return ret; - - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ? - CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL; - - if (!bw) - cpsw_fifo_shp_on(priv, fifo, bw); - - val = slave_read(slave, tx_in_ctl_rg); - if (cpsw_shp_is_off(priv)) { - /* disable FIFOs rate limited queues */ - val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT); - - /* set type of FIFO queues to normal priority mode */ - val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT); - - /* set type of FIFO queues to be rate limited */ - if (bw) - val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT; - else - priv->shp_cfg_speed = 0; - } - - /* toggle a FIFO rate limited queue */ - if (bw) - val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); - else - val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); - slave_write(slave, val, tx_in_ctl_rg); - - /* FIFO transmit shape enable */ - cpsw_fifo_shp_on(priv, fifo, bw); - return 0; -} - -/* Defaults: - * class A - prio 3 - * class B - prio 2 - * shaping for class A should be set first - */ -static int cpsw_set_cbs(struct net_device *ndev, - struct tc_cbs_qopt_offload *qopt) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - int prev_speed = 0; - int tc, ret, fifo; - u32 bw = 0; - - tc = netdev_txq_to_tc(priv->ndev, qopt->queue); - - /* enable channels in backward order, as highest FIFOs must be rate - * limited first and for compliance with CPDMA rate limited channels - * that also used in bacward order. FIFO0 cannot be rate limited. - */ - fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); - if (!fifo) { - dev_err(priv->dev, "Last tc%d can't be rate limited", tc); - return -EINVAL; - } - - /* do nothing, it's disabled anyway */ - if (!qopt->enable && !priv->fifo_bw[fifo]) - return 0; - - /* shapers can be set if link speed is known */ - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - if (slave->phy && slave->phy->link) { - if (priv->shp_cfg_speed && - priv->shp_cfg_speed != slave->phy->speed) - prev_speed = priv->shp_cfg_speed; - - priv->shp_cfg_speed = slave->phy->speed; - } - - if (!priv->shp_cfg_speed) { - dev_err(priv->dev, "Link speed is not known"); - return -1; - } - - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); - return ret; - } - - bw = qopt->enable ? qopt->idleslope : 0; - ret = cpsw_set_fifo_rlimit(priv, fifo, bw); - if (ret) { - priv->shp_cfg_speed = prev_speed; - prev_speed = 0; - } - - if (bw && prev_speed) - dev_warn(priv->dev, - "Speed was changed, CBS shaper speeds are changed!"); - - pm_runtime_put_sync(cpsw->dev); - return ret; -} - -static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) -{ - int fifo, bw; - - for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) { - bw = priv->fifo_bw[fifo]; - if (!bw) - continue; - - cpsw_set_fifo_rlimit(priv, fifo, bw); - } -} - -static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) -{ - struct cpsw_common *cpsw = priv->cpsw; - u32 tx_prio_map = 0; - int i, tc, fifo; - u32 tx_prio_rg; - - if (!priv->mqprio_hw) - return; - - for (i = 0; i < 8; i++) { - tc = netdev_get_prio_tc_map(priv->ndev, i); - fifo = CPSW_FIFO_SHAPERS_NUM - tc; - tx_prio_map |= fifo << (4 * i); - } - - tx_prio_rg = cpsw->version == CPSW_VERSION_1 ? - CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; - - slave_write(slave, tx_prio_map, tx_prio_rg); -} - static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) { struct cpsw_priv *priv = arg; @@ -1853,207 +960,6 @@ fail: return NETDEV_TX_BUSY; } -#if IS_ENABLED(CONFIG_TI_CPTS) - -static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) -{ - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave]; - u32 ts_en, seq_id; - - if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) { - slave_write(slave, 0, CPSW1_TS_CTL); - return; - } - - seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; - ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; - - if (priv->tx_ts_enabled) - ts_en |= CPSW_V1_TS_TX_EN; - - if (priv->rx_ts_enabled) - ts_en |= CPSW_V1_TS_RX_EN; - - slave_write(slave, ts_en, CPSW1_TS_CTL); - slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE); -} - -static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) -{ - struct cpsw_slave *slave; - struct cpsw_common *cpsw = priv->cpsw; - u32 ctrl, mtype; - - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - - ctrl = slave_read(slave, CPSW2_CONTROL); - switch (cpsw->version) { - case CPSW_VERSION_2: - ctrl &= ~CTRL_V2_ALL_TS_MASK; - - if (priv->tx_ts_enabled) - ctrl |= CTRL_V2_TX_TS_BITS; - - if (priv->rx_ts_enabled) - ctrl |= CTRL_V2_RX_TS_BITS; - break; - case CPSW_VERSION_3: - default: - ctrl &= ~CTRL_V3_ALL_TS_MASK; - - if (priv->tx_ts_enabled) - ctrl |= CTRL_V3_TX_TS_BITS; - - if (priv->rx_ts_enabled) - ctrl |= CTRL_V3_RX_TS_BITS; - break; - } - - mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; - - slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); - slave_write(slave, ctrl, CPSW2_CONTROL); - writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype); - writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype); -} - -static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) -{ - struct cpsw_priv *priv = netdev_priv(dev); - struct hwtstamp_config cfg; - struct cpsw_common *cpsw = priv->cpsw; - - if (cpsw->version != CPSW_VERSION_1 && - cpsw->version != CPSW_VERSION_2 && - cpsw->version != CPSW_VERSION_3) - return -EOPNOTSUPP; - - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; - - /* reserved for future extensions */ - if (cfg.flags) - return -EINVAL; - - if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) - return -ERANGE; - - switch (cfg.rx_filter) { - case HWTSTAMP_FILTER_NONE: - priv->rx_ts_enabled = 0; - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_NTP_ALL: - return -ERANGE; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - break; - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - break; - default: - return -ERANGE; - } - - priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON; - - switch (cpsw->version) { - case CPSW_VERSION_1: - cpsw_hwtstamp_v1(priv); - break; - case CPSW_VERSION_2: - case CPSW_VERSION_3: - cpsw_hwtstamp_v2(priv); - break; - default: - WARN_ON(1); - } - - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; -} - -static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) -{ - struct cpsw_common *cpsw = ndev_to_cpsw(dev); - struct cpsw_priv *priv = netdev_priv(dev); - struct hwtstamp_config cfg; - - if (cpsw->version != CPSW_VERSION_1 && - cpsw->version != CPSW_VERSION_2 && - cpsw->version != CPSW_VERSION_3) - return -EOPNOTSUPP; - - cfg.flags = 0; - cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = priv->rx_ts_enabled; - - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; -} -#else -static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) -{ - return -EOPNOTSUPP; -} - -static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) -{ - return -EOPNOTSUPP; -} -#endif /*CONFIG_TI_CPTS*/ - -static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) -{ - struct cpsw_priv *priv = netdev_priv(dev); - struct cpsw_common *cpsw = priv->cpsw; - int slave_no = cpsw_slave_index(cpsw, priv); - - if (!netif_running(dev)) - return -EINVAL; - - switch (cmd) { - case SIOCSHWTSTAMP: - return cpsw_hwtstamp_set(dev, req); - case SIOCGHWTSTAMP: - return cpsw_hwtstamp_get(dev, req); - } - - if (!cpsw->slaves[slave_no].phy) - return -EOPNOTSUPP; - return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); -} - -static void cpsw_ndo_tx_timeout(struct net_device *ndev) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int ch; - - cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); - ndev->stats.tx_errors++; - cpsw_intr_disable(cpsw); - for (ch = 0; ch < cpsw->tx_ch_num; ch++) { - cpdma_chan_stop(cpsw->txv[ch].ch); - cpdma_chan_start(cpsw->txv[ch].ch); - } - - cpsw_intr_enable(cpsw); - netif_trans_update(ndev); - netif_tx_wake_all_queues(ndev); -} - static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) { struct cpsw_priv *priv = netdev_priv(ndev); @@ -2215,168 +1121,13 @@ err: return ret; } -static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - struct cpsw_slave *slave; - u32 min_rate; - u32 ch_rate; - int i, ret; - - ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate; - if (ch_rate == rate) - return 0; - - ch_rate = rate * 1000; - min_rate = cpdma_chan_get_min_rate(cpsw->dma); - if ((ch_rate < min_rate && ch_rate)) { - dev_err(priv->dev, "The channel rate cannot be less than %dMbps", - min_rate); - return -EINVAL; - } - - if (rate > cpsw->speed) { - dev_err(priv->dev, "The channel rate cannot be more than 2Gbps"); - return -EINVAL; - } - - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); - return ret; - } - - ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate); - pm_runtime_put(cpsw->dev); - - if (ret) - return ret; - - /* update rates for slaves tx queues */ - for (i = 0; i < cpsw->data.slaves; i++) { - slave = &cpsw->slaves[i]; - if (!slave->ndev) - continue; - - netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate; - } - - cpsw_split_res(cpsw); - return ret; -} - -static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) -{ - struct tc_mqprio_qopt_offload *mqprio = type_data; - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int fifo, num_tc, count, offset; - struct cpsw_slave *slave; - u32 tx_prio_map = 0; - int i, tc, ret; - - num_tc = mqprio->qopt.num_tc; - if (num_tc > CPSW_TC_NUM) - return -EINVAL; - - if (mqprio->mode != TC_MQPRIO_MODE_DCB) - return -EINVAL; - - ret = pm_runtime_get_sync(cpsw->dev); - if (ret < 0) { - pm_runtime_put_noidle(cpsw->dev); - return ret; - } - - if (num_tc) { - for (i = 0; i < 8; i++) { - tc = mqprio->qopt.prio_tc_map[i]; - fifo = cpsw_tc_to_fifo(tc, num_tc); - tx_prio_map |= fifo << (4 * i); - } - - netdev_set_num_tc(ndev, num_tc); - for (i = 0; i < num_tc; i++) { - count = mqprio->qopt.count[i]; - offset = mqprio->qopt.offset[i]; - netdev_set_tc_queue(ndev, i, count, offset); - } - } - - if (!mqprio->qopt.hw) { - /* restore default configuration */ - netdev_reset_tc(ndev); - tx_prio_map = TX_PRIORITY_MAPPING; - } - - priv->mqprio_hw = mqprio->qopt.hw; - - offset = cpsw->version == CPSW_VERSION_1 ? - CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; - - slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; - slave_write(slave, tx_prio_map, offset); - - pm_runtime_put_sync(cpsw->dev); - - return 0; -} - -static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, - void *type_data) -{ - switch (type) { - case TC_SETUP_QDISC_CBS: - return cpsw_set_cbs(ndev, type_data); - - case TC_SETUP_QDISC_MQPRIO: - return cpsw_set_mqprio(ndev, type_data); - - default: - return -EOPNOTSUPP; - } -} - -static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf) -{ - struct bpf_prog *prog = bpf->prog; - - if (!priv->xdpi.prog && !prog) - return 0; - - if (!xdp_attachment_flags_ok(&priv->xdpi, bpf)) - return -EBUSY; - - WRITE_ONCE(priv->xdp_prog, prog); - - xdp_attachment_setup(&priv->xdpi, bpf); - - return 0; -} - -static int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) -{ - struct cpsw_priv *priv = netdev_priv(ndev); - - switch (bpf->command) { - case XDP_SETUP_PROG: - return cpsw_xdp_prog_setup(priv, bpf); - - case XDP_QUERY_PROG: - return xdp_attachment_query(&priv->xdpi, bpf); - - default: - return -EINVAL; - } -} - static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, u32 flags) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; struct xdp_frame *xdpf; - int i, drops = 0; + int i, drops = 0, port; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; @@ -2389,7 +1140,8 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, continue; } - if (cpsw_xdp_tx_frame(priv, xdpf, NULL)) + port = priv->emac_port + cpsw->data.dual_emac; + if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port)) drops++; } @@ -2775,6 +1527,8 @@ static int cpsw_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, cpsw); + cpsw_slave_index = cpsw_slave_index_priv; + cpsw->dev = dev; mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 84025dcc78d5..929f3d3354e3 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -5,6 +5,8 @@ * Copyright (C) 2012 Texas Instruments * */ +#include <linux/bitmap.h> +#include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -382,6 +384,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, int flags, u16 vid) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int mcast_members; int idx; idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); @@ -390,11 +393,15 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask, cpsw_ale_read(ale, idx, ale_entry); - if (port_mask) - cpsw_ale_set_port_mask(ale_entry, port_mask, + if (port_mask) { + mcast_members = cpsw_ale_get_port_mask(ale_entry, + ale->port_mask_bits); + mcast_members &= ~port_mask; + cpsw_ale_set_port_mask(ale_entry, mcast_members, ale->port_mask_bits); - else + } else { cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + } cpsw_ale_write(ale, idx, ale_entry); return 0; @@ -415,7 +422,18 @@ static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry, writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx)); } -int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, +static void cpsw_ale_set_vlan_untag(struct cpsw_ale *ale, u32 *ale_entry, + u16 vid, int untag_mask) +{ + cpsw_ale_set_vlan_untag_force(ale_entry, + untag_mask, ale->vlan_field_bits); + if (untag_mask & ALE_PORT_HOST) + bitmap_set(ale->p0_untag_vid_mask, vid, 1); + else + bitmap_clear(ale->p0_untag_vid_mask, vid, 1); +} + +int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag, int reg_mcast, int unreg_mcast) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; @@ -427,8 +445,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); cpsw_ale_set_vlan_id(ale_entry, vid); + cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag); - cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits); if (!ale->params.nu_switch_ale) { cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast, ale->vlan_field_bits); @@ -437,7 +455,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, } else { cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast); } - cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits); + cpsw_ale_set_vlan_member_list(ale_entry, port_mask, + ale->vlan_field_bits); if (idx < 0) idx = cpsw_ale_match_free(ale); @@ -450,6 +469,41 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, return 0; } +static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry, + u16 vid, int port_mask) +{ + int reg_mcast, unreg_mcast; + int members, untag; + + members = cpsw_ale_get_vlan_member_list(ale_entry, + ale->vlan_field_bits); + members &= ~port_mask; + + untag = cpsw_ale_get_vlan_untag_force(ale_entry, + ale->vlan_field_bits); + reg_mcast = cpsw_ale_get_vlan_reg_mcast(ale_entry, + ale->vlan_field_bits); + unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry, + ale->vlan_field_bits); + untag &= members; + reg_mcast &= members; + unreg_mcast &= members; + + cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag); + + if (!ale->params.nu_switch_ale) { + cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast, + ale->vlan_field_bits); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast, + ale->vlan_field_bits); + } else { + cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, + unreg_mcast); + } + cpsw_ale_set_vlan_member_list(ale_entry, members, + ale->vlan_field_bits); +} + int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; @@ -461,16 +515,83 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) cpsw_ale_read(ale, idx, ale_entry); - if (port_mask) - cpsw_ale_set_vlan_member_list(ale_entry, port_mask, - ale->vlan_field_bits); - else + if (port_mask) { + cpsw_ale_del_vlan_modify(ale, ale_entry, vid, port_mask); + } else { + cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0); cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + } cpsw_ale_write(ale, idx, ale_entry); + return 0; } +int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask, + int untag_mask, int reg_mask, int unreg_mask) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int reg_mcast_members, unreg_mcast_members; + int vlan_members, untag_members; + int idx, ret = 0; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx >= 0) + cpsw_ale_read(ale, idx, ale_entry); + + vlan_members = cpsw_ale_get_vlan_member_list(ale_entry, + ale->vlan_field_bits); + reg_mcast_members = cpsw_ale_get_vlan_reg_mcast(ale_entry, + ale->vlan_field_bits); + unreg_mcast_members = + cpsw_ale_get_vlan_unreg_mcast(ale_entry, + ale->vlan_field_bits); + untag_members = cpsw_ale_get_vlan_untag_force(ale_entry, + ale->vlan_field_bits); + + vlan_members |= port_mask; + untag_members = (untag_members & ~port_mask) | untag_mask; + reg_mcast_members = (reg_mcast_members & ~port_mask) | reg_mask; + unreg_mcast_members = (unreg_mcast_members & ~port_mask) | unreg_mask; + + ret = cpsw_ale_add_vlan(ale, vid, vlan_members, untag_members, + reg_mcast_members, unreg_mcast_members); + if (ret) { + dev_err(ale->params.dev, "Unable to add vlan\n"); + return ret; + } + dev_dbg(ale->params.dev, "port mask 0x%x untag 0x%x\n", vlan_members, + untag_mask); + + return ret; +} + +void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask, + bool add) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int unreg_members = 0; + int type, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_VLAN) + continue; + + unreg_members = + cpsw_ale_get_vlan_unreg_mcast(ale_entry, + ale->vlan_field_bits); + if (add) + unreg_members |= unreg_mcast_mask; + else + unreg_members &= ~unreg_mcast_mask; + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_members, + ale->vlan_field_bits); + cpsw_ale_write(ale, idx, ale_entry); + } +} + void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port) { u32 ale_entry[ALE_ENTRY_WORDS]; @@ -779,6 +900,7 @@ void cpsw_ale_start(struct cpsw_ale *ale) void cpsw_ale_stop(struct cpsw_ale *ale) { del_timer_sync(&ale->timer); + cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); } @@ -791,6 +913,13 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) if (!ale) return NULL; + ale->p0_untag_vid_mask = + devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID), + sizeof(unsigned long), + GFP_KERNEL); + if (!ale->p0_untag_vid_mask) + return ERR_PTR(-ENOMEM); + ale->params = *params; ale->ageout = ale->params.ale_ageout * HZ; @@ -862,6 +991,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS; } + cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); return ale; } diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 370df254eb12..70d0955c2652 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -35,6 +35,7 @@ struct cpsw_ale { u32 port_mask_bits; u32 port_num_bits; u32 vlan_field_bits; + unsigned long *p0_untag_vid_mask; }; enum cpsw_ale_control { @@ -115,4 +116,14 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control, int value); void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data); +static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid) +{ + return test_bit(vid, ale->p0_untag_vid_mask); +} + +int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask, + int untag_mask, int reg_mcast, int unreg_mcast); +void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask, + bool add); + #endif diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c new file mode 100644 index 000000000000..71215db7934b --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -0,0 +1,2048 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2019 Texas Instruments + */ + +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/timer.h> +#include <linux/module.h> +#include <linux/irqreturn.h> +#include <linux/interrupt.h> +#include <linux/if_ether.h> +#include <linux/etherdevice.h> +#include <linux/net_tstamp.h> +#include <linux/phy.h> +#include <linux/phy/phy.h> +#include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <linux/gpio/consumer.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_device.h> +#include <linux/if_vlan.h> +#include <linux/kmemleak.h> +#include <linux/sys_soc.h> + +#include <net/page_pool.h> +#include <net/pkt_cls.h> +#include <net/devlink.h> + +#include "cpsw.h" +#include "cpsw_ale.h" +#include "cpsw_priv.h" +#include "cpsw_sl.h" +#include "cpsw_switchdev.h" +#include "cpts.h" +#include "davinci_cpdma.h" + +#include <net/pkt_sched.h> + +static int debug_level; +static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT; +static int rx_packet_max = CPSW_MAX_PACKET_SIZE; +static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; + +struct cpsw_devlink { + struct cpsw_common *cpsw; +}; + +enum cpsw_devlink_param_id { + CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + CPSW_DL_PARAM_SWITCH_MODE, + CPSW_DL_PARAM_ALE_BYPASS, +}; + +/* struct cpsw_common is not needed, kept here for compatibility + * reasons witrh the old driver + */ +static int cpsw_slave_index_priv(struct cpsw_common *cpsw, + struct cpsw_priv *priv) +{ + if (priv->emac_port == HOST_PORT_NUM) + return -1; + + return priv->emac_port - 1; +} + +static bool cpsw_is_switch_en(struct cpsw_common *cpsw) +{ + return !cpsw->data.dual_emac; +} + +static void cpsw_set_promiscious(struct net_device *ndev, bool enable) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + bool enable_uni = false; + int i; + + if (cpsw_is_switch_en(cpsw)) + return; + + /* Enabling promiscuous mode for one interface will be + * common for both the interface as the interface shares + * the same hardware resource. + */ + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].ndev && + (cpsw->slaves[i].ndev->flags & IFF_PROMISC)) + enable_uni = true; + + if (!enable && enable_uni) { + enable = enable_uni; + dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n"); + } + + if (enable) { + /* Enable unknown unicast, reg/unreg mcast */ + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, + ALE_P0_UNI_FLOOD, 1); + + dev_dbg(cpsw->dev, "promiscuity enabled\n"); + } else { + /* Disable unknown unicast */ + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, + ALE_P0_UNI_FLOOD, 0); + dev_dbg(cpsw->dev, "promiscuity disabled\n"); + } +} + +/** + * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes + * if it's not deleted + * @ndev: device to sync + * @addr: address to be added or deleted + * @vid: vlan id, if vid < 0 set/unset address for real device + * @add: add address if the flag is set or remove otherwise + */ +static int cpsw_set_mc(struct net_device *ndev, const u8 *addr, + int vid, int add) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int mask, flags, ret, slave_no; + + slave_no = cpsw_slave_index(cpsw, priv); + if (vid < 0) + vid = cpsw->slaves[slave_no].port_vlan; + + mask = ALE_PORT_HOST; + flags = vid ? ALE_VLAN : 0; + + if (add) + ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0); + else + ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); + + return ret; +} + +static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx) +{ + struct addr_sync_ctx *sync_ctx = ctx; + struct netdev_hw_addr *ha; + int found = 0, ret = 0; + + if (!vdev || !(vdev->flags & IFF_UP)) + return 0; + + /* vlan address is relevant if its sync_cnt != 0 */ + netdev_for_each_mc_addr(ha, vdev) { + if (ether_addr_equal(ha->addr, sync_ctx->addr)) { + found = ha->sync_cnt; + break; + } + } + + if (found) + sync_ctx->consumed++; + + if (sync_ctx->flush) { + if (!found) + cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); + return 0; + } + + if (found) + ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1); + + return ret; +} + +static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + int ret; + + sync_ctx.consumed = 0; + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.flush = 0; + + ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); + if (sync_ctx.consumed < num && !ret) + ret = cpsw_set_mc(ndev, addr, -1, 1); + + return ret; +} + +static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + + sync_ctx.consumed = 0; + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.flush = 1; + + vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); + if (sync_ctx.consumed == num) + cpsw_set_mc(ndev, addr, -1, 0); + + return 0; +} + +static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx) +{ + struct addr_sync_ctx *sync_ctx = ctx; + struct netdev_hw_addr *ha; + int found = 0; + + if (!vdev || !(vdev->flags & IFF_UP)) + return 0; + + /* vlan address is relevant if its sync_cnt != 0 */ + netdev_for_each_mc_addr(ha, vdev) { + if (ether_addr_equal(ha->addr, sync_ctx->addr)) { + found = ha->sync_cnt; + break; + } + } + + if (!found) + return 0; + + sync_ctx->consumed++; + cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); + return 0; +} + +static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.consumed = 0; + + vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx); + if (sync_ctx.consumed < num) + cpsw_set_mc(ndev, addr, -1, 0); + + return 0; +} + +static void cpsw_ndo_set_rx_mode(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + + if (ndev->flags & IFF_PROMISC) { + /* Enable promiscuous mode */ + cpsw_set_promiscious(ndev, true); + cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port); + return; + } + + /* Disable promiscuous mode */ + cpsw_set_promiscious(ndev, false); + + /* Restore allmulti on vlans if necessary */ + cpsw_ale_set_allmulti(cpsw->ale, + ndev->flags & IFF_ALLMULTI, priv->emac_port); + + /* add/remove mcast address either for real netdev or for vlan */ + __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, + cpsw_del_mc_addr); +} + +static unsigned int cpsw_rxbuf_total_len(unsigned int len) +{ + len += CPSW_HEADROOM; + len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + return SKB_DATA_ALIGN(len); +} + +static void cpsw_rx_handler(void *token, int len, int status) +{ + struct page *new_page, *page = token; + void *pa = page_address(page); + int headroom = CPSW_HEADROOM; + struct cpsw_meta_xdp *xmeta; + struct cpsw_common *cpsw; + struct net_device *ndev; + int port, ch, pkt_size; + struct cpsw_priv *priv; + struct page_pool *pool; + struct sk_buff *skb; + struct xdp_buff xdp; + int ret = 0; + dma_addr_t dma; + + xmeta = pa + CPSW_XMETA_OFFSET; + cpsw = ndev_to_cpsw(xmeta->ndev); + ndev = xmeta->ndev; + pkt_size = cpsw->rx_packet_max; + ch = xmeta->ch; + + if (status >= 0) { + port = CPDMA_RX_SOURCE_PORT(status); + if (port) + ndev = cpsw->slaves[--port].ndev; + } + + priv = netdev_priv(ndev); + pool = cpsw->page_pool[ch]; + + if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { + /* In dual emac mode check for all interfaces */ + if (cpsw->usage_count && status >= 0) { + /* The packet received is for the interface which + * is already down and the other interface is up + * and running, instead of freeing which results + * in reducing of the number of rx descriptor in + * DMA engine, requeue page back to cpdma. + */ + new_page = page; + goto requeue; + } + + /* the interface is going down, pages are purged */ + page_pool_recycle_direct(pool, page); + return; + } + + new_page = page_pool_dev_alloc_pages(pool); + if (unlikely(!new_page)) { + new_page = page; + ndev->stats.rx_dropped++; + goto requeue; + } + + if (priv->xdp_prog) { + if (status & CPDMA_RX_VLAN_ENCAP) { + xdp.data = pa + CPSW_HEADROOM + + CPSW_RX_VLAN_ENCAP_HDR_SIZE; + xdp.data_end = xdp.data + len - + CPSW_RX_VLAN_ENCAP_HDR_SIZE; + } else { + xdp.data = pa + CPSW_HEADROOM; + xdp.data_end = xdp.data + len; + } + + xdp_set_data_meta_invalid(&xdp); + + xdp.data_hard_start = pa; + xdp.rxq = &priv->xdp_rxq[ch]; + + ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port); + if (ret != CPSW_XDP_PASS) + goto requeue; + + /* XDP prog might have changed packet data and boundaries */ + len = xdp.data_end - xdp.data; + headroom = xdp.data - xdp.data_hard_start; + + /* XDP prog can modify vlan tag, so can't use encap header */ + status &= ~CPDMA_RX_VLAN_ENCAP; + } + + /* pass skb to netstack if no XDP prog or returned XDP_PASS */ + skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size)); + if (!skb) { + ndev->stats.rx_dropped++; + page_pool_recycle_direct(pool, page); + goto requeue; + } + + skb->offload_fwd_mark = priv->offload_fwd_mark; + skb_reserve(skb, headroom); + skb_put(skb, len); + skb->dev = ndev; + if (status & CPDMA_RX_VLAN_ENCAP) + cpsw_rx_vlan_encap(skb); + if (priv->rx_ts_enabled) + cpts_rx_timestamp(cpsw->cpts, skb); + skb->protocol = eth_type_trans(skb, ndev); + + /* unmap page as no netstack skb page recycling */ + page_pool_release_page(pool, page); + netif_receive_skb(skb); + + ndev->stats.rx_bytes += len; + ndev->stats.rx_packets++; + +requeue: + xmeta = page_address(new_page) + CPSW_XMETA_OFFSET; + xmeta->ndev = ndev; + xmeta->ch = ch; + + dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM; + ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma, + pkt_size, 0); + if (ret < 0) { + WARN_ON(ret == -ENOMEM); + page_pool_recycle_direct(pool, new_page); + } +} + +static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, + unsigned short vid) +{ + struct cpsw_common *cpsw = priv->cpsw; + int unreg_mcast_mask = 0; + int mcast_mask; + u32 port_mask; + int ret; + + port_mask = (1 << priv->emac_port) | ALE_PORT_HOST; + + mcast_mask = ALE_PORT_HOST; + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = mcast_mask; + + ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask, + unreg_mcast_mask); + if (ret != 0) + return ret; + + ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + if (ret != 0) + goto clean_vid; + + ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, + mcast_mask, ALE_VLAN, vid, 0); + if (ret != 0) + goto clean_vlan_ucast; + return 0; + +clean_vlan_ucast: + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); +clean_vid: + cpsw_ale_del_vlan(cpsw->ale, vid, 0); + return ret; +} + +static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret, i; + + if (cpsw_is_switch_en(cpsw)) { + dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n"); + return 0; + } + + if (vid == cpsw->data.default_vlan) + return 0; + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + /* In dual EMAC, reserved VLAN id should not be used for + * creating VLAN interfaces as this can break the dual + * EMAC port separation + */ + for (i = 0; i < cpsw->data.slaves; i++) { + if (cpsw->slaves[i].ndev && + vid == cpsw->slaves[i].port_vlan) { + ret = -EINVAL; + goto err; + } + } + + dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid); + ret = cpsw_add_vlan_ale_entry(priv, vid); +err: + pm_runtime_put(cpsw->dev); + return ret; +} + +static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) +{ + struct cpsw_priv *priv = arg; + + if (!vdev || !vid) + return 0; + + cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid); + return 0; +} + +/* restore resources after port reset */ +static void cpsw_restore(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + + /* restore vlan configurations */ + vlan_for_each(priv->ndev, cpsw_restore_vlans, priv); + + /* restore MQPRIO offload */ + cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv); + + /* restore CBS offload */ + cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv); +} + +static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw) +{ + char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0}; + + cpsw_ale_add_mcast(cpsw->ale, stpa, + ALE_PORT_HOST, ALE_SUPER, 0, + ALE_MCAST_BLOCK_LEARN_FWD); +} + +static void cpsw_init_host_port_switch(struct cpsw_common *cpsw) +{ + int vlan = cpsw->data.default_vlan; + + writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl); + + writel(vlan, &cpsw->host_port_regs->port_vlan); + + cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, + ALE_ALL_PORTS, ALE_ALL_PORTS, + ALE_PORT_1 | ALE_PORT_2); + + cpsw_init_stp_ale_entry(cpsw); + + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1); + dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n"); + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0); +} + +static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw) +{ + int vlan = cpsw->data.default_vlan; + + writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl); + + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0); + dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n"); + + writel(vlan, &cpsw->host_port_regs->port_vlan); + + cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); + /* learning make no sense in dual_mac mode */ + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1); +} + +static void cpsw_init_host_port(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 control_reg; + + /* soft reset the controller and initialize ale */ + soft_reset("cpsw", &cpsw->regs->soft_reset); + cpsw_ale_start(cpsw->ale); + + /* switch to vlan unaware mode */ + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, + CPSW_ALE_VLAN_AWARE); + control_reg = readl(&cpsw->regs->control); + control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP; + writel(control_reg, &cpsw->regs->control); + + /* setup host port priority mapping */ + writel_relaxed(CPDMA_TX_PRIORITY_MAP, + &cpsw->host_port_regs->cpdma_tx_pri_map); + writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map); + + /* disable priority elevation */ + writel_relaxed(0, &cpsw->regs->ptype); + + /* enable statistics collection only on all ports */ + writel_relaxed(0x7, &cpsw->regs->stat_port_en); + + /* Enable internal fifo flow control */ + writel(0x7, &cpsw->regs->flow_control); + + if (cpsw_is_switch_en(cpsw)) + cpsw_init_host_port_switch(cpsw); + else + cpsw_init_host_port_dual_mac(cpsw); + + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); +} + +static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv, + struct cpsw_slave *slave) +{ + u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST; + struct cpsw_common *cpsw = priv->cpsw; + u32 reg; + + reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : + CPSW2_PORT_VLAN; + slave_write(slave, slave->port_vlan, reg); + + cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask, + port_mask, port_mask, 0); + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, + ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, + ALE_MCAST_FWD); + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN | + ALE_SECURE, slave->port_vlan); + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_DROP_UNKNOWN_VLAN, 1); + /* learning make no sense in dual_mac mode */ + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_NOLEARN, 1); +} + +static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv, + struct cpsw_slave *slave) +{ + u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST; + struct cpsw_common *cpsw = priv->cpsw; + u32 reg; + + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_DROP_UNKNOWN_VLAN, 0); + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_NOLEARN, 0); + /* disabling SA_UPDATE required to make stp work, without this setting + * Host MAC addresses will jump between ports. + * As per TRM MAC address can be defined as unicast supervisory (super) + * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent + * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE + * causes STP packets to be dropped due to ingress filter + * if (source address found) and (secure) and + * (receive port number != port_number)) + * then discard the packet + */ + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_NO_SA_UPDATE, 1); + + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, + port_mask, ALE_VLAN, slave->port_vlan, + ALE_MCAST_FWD_2); + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, slave->port_vlan); + + reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : + CPSW2_PORT_VLAN; + slave_write(slave, slave->port_vlan, reg); +} + +static void cpsw_adjust_link(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + struct phy_device *phy; + u32 mac_control = 0; + + slave = &cpsw->slaves[priv->emac_port - 1]; + phy = slave->phy; + + if (!phy) + return; + + if (phy->link) { + mac_control = CPSW_SL_CTL_GMII_EN; + + if (phy->speed == 1000) + mac_control |= CPSW_SL_CTL_GIG; + if (phy->duplex) + mac_control |= CPSW_SL_CTL_FULLDUPLEX; + + /* set speed_in input in case RMII mode is used in 100Mbps */ + if (phy->speed == 100) + mac_control |= CPSW_SL_CTL_IFCTL_A; + /* in band mode only works in 10Mbps RGMII mode */ + else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) + mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */ + + if (priv->rx_pause) + mac_control |= CPSW_SL_CTL_RX_FLOW_EN; + + if (priv->tx_pause) + mac_control |= CPSW_SL_CTL_TX_FLOW_EN; + + if (mac_control != slave->mac_control) + cpsw_sl_ctl_set(slave->mac_sl, mac_control); + + /* enable forwarding */ + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); + + netif_tx_wake_all_queues(ndev); + + if (priv->shp_cfg_speed && + priv->shp_cfg_speed != slave->phy->speed && + !cpsw_shp_is_off(priv)) + dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!"); + } else { + netif_tx_stop_all_queues(ndev); + + mac_control = 0; + /* disable forwarding */ + cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); + + cpsw_sl_wait_for_idle(slave->mac_sl, 100); + + cpsw_sl_ctl_reset(slave->mac_sl); + } + + if (mac_control != slave->mac_control) + phy_print_status(phy); + + slave->mac_control = mac_control; + + if (phy->link && cpsw_need_resplit(cpsw)) + cpsw_split_res(cpsw); +} + +static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct phy_device *phy; + + cpsw_sl_reset(slave->mac_sl, 100); + cpsw_sl_ctl_reset(slave->mac_sl); + + /* setup priority mapping */ + cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP, + RX_PRIORITY_MAPPING); + + switch (cpsw->version) { + case CPSW_VERSION_1: + slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); + /* Increase RX FIFO size to 5 for supporting fullduplex + * flow control mode + */ + slave_write(slave, + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | + CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); + break; + case CPSW_VERSION_2: + case CPSW_VERSION_3: + case CPSW_VERSION_4: + slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); + /* Increase RX FIFO size to 5 for supporting fullduplex + * flow control mode + */ + slave_write(slave, + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | + CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); + break; + } + + /* setup max packet size, and mac address */ + cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN, + cpsw->rx_packet_max); + cpsw_set_slave_mac(slave, priv); + + slave->mac_control = 0; /* no link yet */ + + if (cpsw_is_switch_en(cpsw)) + cpsw_port_add_switch_def_ale_entries(priv, slave); + else + cpsw_port_add_dual_emac_def_ale_entries(priv, slave); + + if (!slave->data->phy_node) + dev_err(priv->dev, "no phy found on slave %d\n", + slave->slave_num); + phy = of_phy_connect(priv->ndev, slave->data->phy_node, + &cpsw_adjust_link, 0, slave->data->phy_if); + if (!phy) { + dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n", + slave->data->phy_node, + slave->slave_num); + return; + } + slave->phy = phy; + + phy_attached_info(slave->phy); + + phy_start(slave->phy); + + /* Configure GMII_SEL register */ + phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET, + slave->data->phy_if); +} + +static int cpsw_ndo_stop(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + + cpsw_info(priv, ifdown, "shutting down ndev\n"); + slave = &cpsw->slaves[priv->emac_port - 1]; + if (slave->phy) + phy_stop(slave->phy); + + netif_tx_stop_all_queues(priv->ndev); + + if (slave->phy) { + phy_disconnect(slave->phy); + slave->phy = NULL; + } + + __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc); + + if (cpsw->usage_count <= 1) { + napi_disable(&cpsw->napi_rx); + napi_disable(&cpsw->napi_tx); + cpts_unregister(cpsw->cpts); + cpsw_intr_disable(cpsw); + cpdma_ctlr_stop(cpsw->dma); + cpsw_ale_stop(cpsw->ale); + cpsw_destroy_xdp_rxqs(cpsw); + } + + if (cpsw_need_resplit(cpsw)) + cpsw_split_res(cpsw); + + cpsw->usage_count--; + pm_runtime_put_sync(cpsw->dev); + return 0; +} + +static int cpsw_ndo_open(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret; + + dev_info(priv->dev, "starting ndev. mode: %s\n", + cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac"); + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + /* Notify the stack of the actual queue counts. */ + ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of tx queues\n"); + goto pm_cleanup; + } + + ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of rx queues\n"); + goto pm_cleanup; + } + + /* Initialize host and slave ports */ + if (!cpsw->usage_count) + cpsw_init_host_port(priv); + cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv); + + /* initialize shared resources for every ndev */ + if (!cpsw->usage_count) { + /* create rxqs for both infs in dual mac as they use same pool + * and must be destroyed together when no users. + */ + ret = cpsw_create_xdp_rxqs(cpsw); + if (ret < 0) + goto err_cleanup; + + ret = cpsw_fill_rx_channels(priv); + if (ret < 0) + goto err_cleanup; + + if (cpts_register(cpsw->cpts)) + dev_err(priv->dev, "error registering cpts device\n"); + + napi_enable(&cpsw->napi_rx); + napi_enable(&cpsw->napi_tx); + + if (cpsw->tx_irq_disabled) { + cpsw->tx_irq_disabled = false; + enable_irq(cpsw->irqs_table[1]); + } + + if (cpsw->rx_irq_disabled) { + cpsw->rx_irq_disabled = false; + enable_irq(cpsw->irqs_table[0]); + } + } + + cpsw_restore(priv); + + /* Enable Interrupt pacing if configured */ + if (cpsw->coal_intvl != 0) { + struct ethtool_coalesce coal; + + coal.rx_coalesce_usecs = cpsw->coal_intvl; + cpsw_set_coalesce(ndev, &coal); + } + + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + cpsw->usage_count++; + + return 0; + +err_cleanup: + cpsw_ndo_stop(ndev); + +pm_cleanup: + pm_runtime_put_sync(cpsw->dev); + return ret; +} + +static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpts *cpts = cpsw->cpts; + struct netdev_queue *txq; + struct cpdma_chan *txch; + int ret, q_idx; + + if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { + cpsw_err(priv, tx_err, "packet pad failed\n"); + ndev->stats.tx_dropped++; + return NET_XMIT_DROP; + } + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb)) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + q_idx = skb_get_queue_mapping(skb); + if (q_idx >= cpsw->tx_ch_num) + q_idx = q_idx % cpsw->tx_ch_num; + + txch = cpsw->txv[q_idx].ch; + txq = netdev_get_tx_queue(ndev, q_idx); + skb_tx_timestamp(skb); + ret = cpdma_chan_submit(txch, skb, skb->data, skb->len, + priv->emac_port); + if (unlikely(ret != 0)) { + cpsw_err(priv, tx_err, "desc submit failed\n"); + goto fail; + } + + /* If there is no more tx desc left free then we need to + * tell the kernel to stop sending us tx frames. + */ + if (unlikely(!cpdma_check_free_tx_desc(txch))) { + netif_tx_stop_queue(txq); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (cpdma_check_free_tx_desc(txch)) + netif_tx_wake_queue(txq); + } + + return NETDEV_TX_OK; +fail: + ndev->stats.tx_dropped++; + netif_tx_stop_queue(txq); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (cpdma_check_free_tx_desc(txch)) + netif_tx_wake_queue(txq); + + return NETDEV_TX_BUSY; +} + +static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) +{ + struct sockaddr *addr = (struct sockaddr *)p; + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret, slave_no; + int flags = 0; + u16 vid = 0; + + slave_no = cpsw_slave_index(cpsw, priv); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + vid = cpsw->slaves[slave_no].port_vlan; + flags = ALE_VLAN | ALE_SECURE; + + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, + flags, vid); + cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM, + flags, vid); + + ether_addr_copy(priv->mac_addr, addr->sa_data); + ether_addr_copy(ndev->dev_addr, priv->mac_addr); + cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv); + + pm_runtime_put(cpsw->dev); + + return 0; +} + +static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ret; + int i; + + if (cpsw_is_switch_en(cpsw)) { + dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n"); + return 0; + } + + if (vid == cpsw->data.default_vlan) + return 0; + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + for (i = 0; i < cpsw->data.slaves; i++) { + if (cpsw->slaves[i].ndev && + vid == cpsw->slaves[i].port_vlan) + goto err; + } + + dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid); + cpsw_ale_del_vlan(cpsw->ale, vid, 0); + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); + cpsw_ale_flush_multicast(cpsw->ale, 0, vid); +err: + pm_runtime_put(cpsw->dev); + return ret; +} + +static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name, + size_t len) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int err; + + err = snprintf(name, len, "p%d", priv->emac_port); + + if (err >= len) + return -EINVAL; + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cpsw_ndo_poll_controller(struct net_device *ndev) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + cpsw_intr_disable(cpsw); + cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw); + cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw); + cpsw_intr_enable(cpsw); +} +#endif + +static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct xdp_frame *xdpf; + int i, drops = 0; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + for (i = 0; i < n; i++) { + xdpf = frames[i]; + if (xdpf->len < CPSW_MIN_PACKET_SIZE) { + xdp_return_frame_rx_napi(xdpf); + drops++; + continue; + } + + if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port)) + drops++; + } + + return n - drops; +} + +static int cpsw_get_port_parent_id(struct net_device *ndev, + struct netdev_phys_item_id *ppid) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + ppid->id_len = sizeof(cpsw->base_mac); + memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len); + + return 0; +} + +static const struct net_device_ops cpsw_netdev_ops = { + .ndo_open = cpsw_ndo_open, + .ndo_stop = cpsw_ndo_stop, + .ndo_start_xmit = cpsw_ndo_start_xmit, + .ndo_set_mac_address = cpsw_ndo_set_mac_address, + .ndo_do_ioctl = cpsw_ndo_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = cpsw_ndo_tx_timeout, + .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, + .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cpsw_ndo_poll_controller, +#endif + .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, + .ndo_setup_tc = cpsw_ndo_setup_tc, + .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name, + .ndo_bpf = cpsw_ndo_bpf, + .ndo_xdp_xmit = cpsw_ndo_xdp_xmit, + .ndo_get_port_parent_id = cpsw_get_port_parent_id, +}; + +static void cpsw_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct platform_device *pdev; + + pdev = to_platform_device(cpsw->dev); + strlcpy(info->driver, "cpsw-switch", sizeof(info->driver)); + strlcpy(info->version, "2.0", sizeof(info->version)); + strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); +} + +static int cpsw_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no; + + slave_no = cpsw_slave_index(cpsw, priv); + if (!cpsw->slaves[slave_no].phy) + return -EINVAL; + + if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause)) + return -EINVAL; + + priv->rx_pause = pause->rx_pause ? true : false; + priv->tx_pause = pause->tx_pause ? true : false; + + phy_set_asym_pause(cpsw->slaves[slave_no].phy, + priv->rx_pause, priv->tx_pause); + + return 0; +} + +static int cpsw_set_channels(struct net_device *ndev, + struct ethtool_channels *chs) +{ + return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler); +} + +static const struct ethtool_ops cpsw_ethtool_ops = { + .get_drvinfo = cpsw_get_drvinfo, + .get_msglevel = cpsw_get_msglevel, + .set_msglevel = cpsw_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ts_info = cpsw_get_ts_info, + .get_coalesce = cpsw_get_coalesce, + .set_coalesce = cpsw_set_coalesce, + .get_sset_count = cpsw_get_sset_count, + .get_strings = cpsw_get_strings, + .get_ethtool_stats = cpsw_get_ethtool_stats, + .get_pauseparam = cpsw_get_pauseparam, + .set_pauseparam = cpsw_set_pauseparam, + .get_wol = cpsw_get_wol, + .set_wol = cpsw_set_wol, + .get_regs_len = cpsw_get_regs_len, + .get_regs = cpsw_get_regs, + .begin = cpsw_ethtool_op_begin, + .complete = cpsw_ethtool_op_complete, + .get_channels = cpsw_get_channels, + .set_channels = cpsw_set_channels, + .get_link_ksettings = cpsw_get_link_ksettings, + .set_link_ksettings = cpsw_set_link_ksettings, + .get_eee = cpsw_get_eee, + .set_eee = cpsw_set_eee, + .nway_reset = cpsw_nway_reset, + .get_ringparam = cpsw_get_ringparam, + .set_ringparam = cpsw_set_ringparam, +}; + +static int cpsw_probe_dt(struct cpsw_common *cpsw) +{ + struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np; + struct cpsw_platform_data *data = &cpsw->data; + struct device *dev = cpsw->dev; + int ret; + u32 prop; + + if (!node) + return -EINVAL; + + tmp_node = of_get_child_by_name(node, "ethernet-ports"); + if (!tmp_node) + return -ENOENT; + data->slaves = of_get_child_count(tmp_node); + if (data->slaves != CPSW_SLAVE_PORTS_NUM) { + of_node_put(tmp_node); + return -ENOENT; + } + + data->active_slave = 0; + data->channels = CPSW_MAX_QUEUES; + data->ale_entries = CPSW_ALE_NUM_ENTRIES; + data->dual_emac = 1; + data->bd_ram_size = CPSW_BD_RAM_SIZE; + data->mac_control = 0; + + data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM, + sizeof(struct cpsw_slave_data), + GFP_KERNEL); + if (!data->slave_data) + return -ENOMEM; + + /* Populate all the child nodes here... + */ + ret = devm_of_platform_populate(dev); + /* We do not want to force this, as in some cases may not have child */ + if (ret) + dev_warn(dev, "Doesn't have any child node\n"); + + for_each_child_of_node(tmp_node, port_np) { + struct cpsw_slave_data *slave_data; + const void *mac_addr; + u32 port_id; + + ret = of_property_read_u32(port_np, "reg", &port_id); + if (ret < 0) { + dev_err(dev, "%pOF error reading port_id %d\n", + port_np, ret); + goto err_node_put; + } + + if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) { + dev_err(dev, "%pOF has invalid port_id %u\n", + port_np, port_id); + ret = -EINVAL; + goto err_node_put; + } + + slave_data = &data->slave_data[port_id - 1]; + + slave_data->disabled = !of_device_is_available(port_np); + if (slave_data->disabled) + continue; + + slave_data->slave_node = port_np; + slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL); + if (IS_ERR(slave_data->ifphy)) { + ret = PTR_ERR(slave_data->ifphy); + dev_err(dev, "%pOF: Error retrieving port phy: %d\n", + port_np, ret); + goto err_node_put; + } + + if (of_phy_is_fixed_link(port_np)) { + ret = of_phy_register_fixed_link(port_np); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(dev, "%pOF failed to register fixed-link phy: %d\n", + port_np, ret); + goto err_node_put; + } + slave_data->phy_node = of_node_get(port_np); + } else { + slave_data->phy_node = + of_parse_phandle(port_np, "phy-handle", 0); + } + + if (!slave_data->phy_node) { + dev_err(dev, "%pOF no phy found\n", port_np); + ret = -ENODEV; + goto err_node_put; + } + + ret = of_get_phy_mode(port_np, &slave_data->phy_if); + if (ret) { + dev_err(dev, "%pOF read phy-mode err %d\n", + port_np, ret); + goto err_node_put; + } + + mac_addr = of_get_mac_address(port_np); + if (!IS_ERR(mac_addr)) { + ether_addr_copy(slave_data->mac_addr, mac_addr); + } else { + ret = ti_cm_get_macid(dev, port_id - 1, + slave_data->mac_addr); + if (ret) + goto err_node_put; + } + + if (of_property_read_u32(port_np, "ti,dual-emac-pvid", + &prop)) { + dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n", + port_np); + slave_data->dual_emac_res_vlan = port_id; + dev_err(dev, "%pOF Using %d as Reserved VLAN\n", + port_np, slave_data->dual_emac_res_vlan); + } else { + slave_data->dual_emac_res_vlan = prop; + } + } + + of_node_put(tmp_node); + return 0; + +err_node_put: + of_node_put(port_np); + return ret; +} + +static void cpsw_remove_dt(struct cpsw_common *cpsw) +{ + struct cpsw_platform_data *data = &cpsw->data; + int i = 0; + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave_data *slave_data = &data->slave_data[i]; + struct device_node *port_np = slave_data->phy_node; + + if (port_np) { + if (of_phy_is_fixed_link(port_np)) + of_phy_deregister_fixed_link(port_np); + + of_node_put(port_np); + } + } +} + +static int cpsw_create_ports(struct cpsw_common *cpsw) +{ + struct cpsw_platform_data *data = &cpsw->data; + struct net_device *ndev, *napi_ndev = NULL; + struct device *dev = cpsw->dev; + struct cpsw_priv *priv; + int ret = 0, i = 0; + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave_data *slave_data = &data->slave_data[i]; + + if (slave_data->disabled) + continue; + + ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv), + CPSW_MAX_QUEUES, + CPSW_MAX_QUEUES); + if (!ndev) { + dev_err(dev, "error allocating net_device\n"); + return -ENOMEM; + } + + priv = netdev_priv(ndev); + priv->cpsw = cpsw; + priv->ndev = ndev; + priv->dev = dev; + priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); + priv->emac_port = i + 1; + + if (is_valid_ether_addr(slave_data->mac_addr)) { + ether_addr_copy(priv->mac_addr, slave_data->mac_addr); + dev_info(cpsw->dev, "Detected MACID = %pM\n", + priv->mac_addr); + } else { + eth_random_addr(slave_data->mac_addr); + dev_info(cpsw->dev, "Random MACID = %pM\n", + priv->mac_addr); + } + ether_addr_copy(ndev->dev_addr, slave_data->mac_addr); + ether_addr_copy(priv->mac_addr, slave_data->mac_addr); + + cpsw->slaves[i].ndev = ndev; + + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL; + + ndev->netdev_ops = &cpsw_netdev_ops; + ndev->ethtool_ops = &cpsw_ethtool_ops; + SET_NETDEV_DEV(ndev, dev); + + if (!napi_ndev) { + /* CPSW Host port CPDMA interface is shared between + * ports and there is only one TX and one RX IRQs + * available for all possible TX and RX channels + * accordingly. + */ + netif_napi_add(ndev, &cpsw->napi_rx, + cpsw->quirk_irq ? + cpsw_rx_poll : cpsw_rx_mq_poll, + CPSW_POLL_WEIGHT); + netif_tx_napi_add(ndev, &cpsw->napi_tx, + cpsw->quirk_irq ? + cpsw_tx_poll : cpsw_tx_mq_poll, + CPSW_POLL_WEIGHT); + } + + napi_ndev = ndev; + } + + return ret; +} + +static void cpsw_unregister_ports(struct cpsw_common *cpsw) +{ + int i = 0; + + for (i = 0; i < cpsw->data.slaves; i++) { + if (!cpsw->slaves[i].ndev) + continue; + + unregister_netdev(cpsw->slaves[i].ndev); + } +} + +static int cpsw_register_ports(struct cpsw_common *cpsw) +{ + int ret = 0, i = 0; + + for (i = 0; i < cpsw->data.slaves; i++) { + if (!cpsw->slaves[i].ndev) + continue; + + /* register the network device */ + ret = register_netdev(cpsw->slaves[i].ndev); + if (ret) { + dev_err(cpsw->dev, + "cpsw: err registering net device%d\n", i); + cpsw->slaves[i].ndev = NULL; + break; + } + } + + if (ret) + cpsw_unregister_ports(cpsw); + return ret; +} + +bool cpsw_port_dev_check(const struct net_device *ndev) +{ + if (ndev->netdev_ops == &cpsw_netdev_ops) { + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + return !cpsw->data.dual_emac; + } + + return false; +} + +static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw) +{ + int set_val = 0; + int i; + + if (!cpsw->ale_bypass && + (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2))) + set_val = 1; + + dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val); + + for (i = 0; i < cpsw->data.slaves; i++) { + struct net_device *sl_ndev = cpsw->slaves[i].ndev; + struct cpsw_priv *priv = netdev_priv(sl_ndev); + + priv->offload_fwd_mark = set_val; + } +} + +static int cpsw_netdevice_port_link(struct net_device *ndev, + struct net_device *br_ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + + if (!cpsw->br_members) { + cpsw->hw_bridge_dev = br_ndev; + } else { + /* This is adding the port to a second bridge, this is + * unsupported + */ + if (cpsw->hw_bridge_dev != br_ndev) + return -EOPNOTSUPP; + } + + cpsw->br_members |= BIT(priv->emac_port); + + cpsw_port_offload_fwd_mark_update(cpsw); + + return NOTIFY_DONE; +} + +static void cpsw_netdevice_port_unlink(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + + cpsw->br_members &= ~BIT(priv->emac_port); + + cpsw_port_offload_fwd_mark_update(cpsw); + + if (!cpsw->br_members) + cpsw->hw_bridge_dev = NULL; +} + +/* netdev notifier */ +static int cpsw_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + int ret = NOTIFY_DONE; + + if (!cpsw_port_dev_check(ndev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGEUPPER: + info = ptr; + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + ret = cpsw_netdevice_port_link(ndev, + info->upper_dev); + else + cpsw_netdevice_port_unlink(ndev); + } + break; + default: + return NOTIFY_DONE; + } + + return notifier_from_errno(ret); +} + +static struct notifier_block cpsw_netdevice_nb __read_mostly = { + .notifier_call = cpsw_netdevice_event, +}; + +static int cpsw_register_notifiers(struct cpsw_common *cpsw) +{ + int ret = 0; + + ret = register_netdevice_notifier(&cpsw_netdevice_nb); + if (ret) { + dev_err(cpsw->dev, "can't register netdevice notifier\n"); + return ret; + } + + ret = cpsw_switchdev_register_notifiers(cpsw); + if (ret) + unregister_netdevice_notifier(&cpsw_netdevice_nb); + + return ret; +} + +static void cpsw_unregister_notifiers(struct cpsw_common *cpsw) +{ + cpsw_switchdev_unregister_notifiers(cpsw); + unregister_netdevice_notifier(&cpsw_netdevice_nb); +} + +static const struct devlink_ops cpsw_devlink_ops = { +}; + +static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct cpsw_devlink *dl_priv = devlink_priv(dl); + struct cpsw_common *cpsw = dl_priv->cpsw; + + dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); + + if (id != CPSW_DL_PARAM_SWITCH_MODE) + return -EOPNOTSUPP; + + ctx->val.vbool = !cpsw->data.dual_emac; + + return 0; +} + +static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct cpsw_devlink *dl_priv = devlink_priv(dl); + struct cpsw_common *cpsw = dl_priv->cpsw; + int vlan = cpsw->data.default_vlan; + bool switch_en = ctx->val.vbool; + bool if_running = false; + int i; + + dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); + + if (id != CPSW_DL_PARAM_SWITCH_MODE) + return -EOPNOTSUPP; + + if (switch_en == !cpsw->data.dual_emac) + return 0; + + if (!switch_en && cpsw->br_members) { + dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n"); + return -EINVAL; + } + + rtnl_lock(); + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + struct net_device *sl_ndev = slave->ndev; + + if (!sl_ndev || !netif_running(sl_ndev)) + continue; + + if_running = true; + } + + if (!if_running) { + /* all ndevs are down */ + cpsw->data.dual_emac = !switch_en; + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + struct net_device *sl_ndev = slave->ndev; + struct cpsw_priv *priv; + + if (!sl_ndev) + continue; + + priv = netdev_priv(sl_ndev); + if (switch_en) + vlan = cpsw->data.default_vlan; + else + vlan = slave->data->dual_emac_res_vlan; + slave->port_vlan = vlan; + } + goto exit; + } + + if (switch_en) { + dev_info(cpsw->dev, "Enable switch mode\n"); + + /* enable bypass - no forwarding; all traffic goes to Host */ + cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1); + + /* clean up ALE table */ + cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1); + cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT); + + cpsw_init_host_port_switch(cpsw); + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + struct net_device *sl_ndev = slave->ndev; + struct cpsw_priv *priv; + + if (!sl_ndev) + continue; + + priv = netdev_priv(sl_ndev); + slave->port_vlan = vlan; + if (netif_running(sl_ndev)) + cpsw_port_add_switch_def_ale_entries(priv, + slave); + } + + cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0); + cpsw->data.dual_emac = false; + } else { + dev_info(cpsw->dev, "Disable switch mode\n"); + + /* enable bypass - no forwarding; all traffic goes to Host */ + cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1); + + cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1); + cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT); + + cpsw_init_host_port_dual_mac(cpsw); + + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + struct net_device *sl_ndev = slave->ndev; + struct cpsw_priv *priv; + + if (!sl_ndev) + continue; + + priv = netdev_priv(slave->ndev); + slave->port_vlan = slave->data->dual_emac_res_vlan; + cpsw_port_add_dual_emac_def_ale_entries(priv, slave); + } + + cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0); + cpsw->data.dual_emac = true; + } +exit: + rtnl_unlock(); + + return 0; +} + +static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct cpsw_devlink *dl_priv = devlink_priv(dl); + struct cpsw_common *cpsw = dl_priv->cpsw; + + dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); + + switch (id) { + case CPSW_DL_PARAM_ALE_BYPASS: + ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct cpsw_devlink *dl_priv = devlink_priv(dl); + struct cpsw_common *cpsw = dl_priv->cpsw; + int ret = -EOPNOTSUPP; + + dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id); + + switch (id) { + case CPSW_DL_PARAM_ALE_BYPASS: + ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, + ctx->val.vbool); + if (!ret) { + cpsw->ale_bypass = ctx->val.vbool; + cpsw_port_offload_fwd_mark_update(cpsw); + } + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct devlink_param cpsw_devlink_params[] = { + DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE, + "switch_mode", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set, + NULL), + DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS, + "ale_bypass", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL), +}; + +static int cpsw_register_devlink(struct cpsw_common *cpsw) +{ + struct device *dev = cpsw->dev; + struct cpsw_devlink *dl_priv; + int ret = 0; + + cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv)); + if (!cpsw->devlink) + return -ENOMEM; + + dl_priv = devlink_priv(cpsw->devlink); + dl_priv->cpsw = cpsw; + + ret = devlink_register(cpsw->devlink, dev); + if (ret) { + dev_err(dev, "DL reg fail ret:%d\n", ret); + goto dl_free; + } + + ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params, + ARRAY_SIZE(cpsw_devlink_params)); + if (ret) { + dev_err(dev, "DL params reg fail ret:%d\n", ret); + goto dl_unreg; + } + + devlink_params_publish(cpsw->devlink); + return ret; + +dl_unreg: + devlink_unregister(cpsw->devlink); +dl_free: + devlink_free(cpsw->devlink); + return ret; +} + +static void cpsw_unregister_devlink(struct cpsw_common *cpsw) +{ + devlink_params_unpublish(cpsw->devlink); + devlink_params_unregister(cpsw->devlink, cpsw_devlink_params, + ARRAY_SIZE(cpsw_devlink_params)); + devlink_unregister(cpsw->devlink); + devlink_free(cpsw->devlink); +} + +static const struct of_device_id cpsw_of_mtable[] = { + { .compatible = "ti,cpsw-switch"}, + { .compatible = "ti,am335x-cpsw-switch"}, + { .compatible = "ti,am4372-cpsw-switch"}, + { .compatible = "ti,dra7-cpsw-switch"}, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, cpsw_of_mtable); + +static const struct soc_device_attribute cpsw_soc_devices[] = { + { .family = "AM33xx", .revision = "ES1.0"}, + { /* sentinel */ } +}; + +static int cpsw_probe(struct platform_device *pdev) +{ + const struct soc_device_attribute *soc; + struct device *dev = &pdev->dev; + struct cpsw_common *cpsw; + struct resource *ss_res; + struct gpio_descs *mode; + void __iomem *ss_regs; + int ret = 0, ch; + struct clk *clk; + int irq; + + cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL); + if (!cpsw) + return -ENOMEM; + + cpsw_slave_index = cpsw_slave_index_priv; + + cpsw->dev = dev; + + cpsw->slaves = devm_kcalloc(dev, + CPSW_SLAVE_PORTS_NUM, + sizeof(struct cpsw_slave), + GFP_KERNEL); + if (!cpsw->slaves) + return -ENOMEM; + + mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); + if (IS_ERR(mode)) { + ret = PTR_ERR(mode); + dev_err(dev, "gpio request failed, ret %d\n", ret); + return ret; + } + + clk = devm_clk_get(dev, "fck"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_err(dev, "fck is not found %d\n", ret); + return ret; + } + cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; + + ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ss_regs = devm_ioremap_resource(dev, ss_res); + if (IS_ERR(ss_regs)) { + ret = PTR_ERR(ss_regs); + return ret; + } + cpsw->regs = ss_regs; + + irq = platform_get_irq_byname(pdev, "rx"); + if (irq < 0) + return irq; + cpsw->irqs_table[0] = irq; + + irq = platform_get_irq_byname(pdev, "tx"); + if (irq < 0) + return irq; + cpsw->irqs_table[1] = irq; + + platform_set_drvdata(pdev, cpsw); + /* This may be required here for child devices. */ + pm_runtime_enable(dev); + + /* Need to enable clocks with runtime PM api to access module + * registers + */ + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + return ret; + } + + ret = cpsw_probe_dt(cpsw); + if (ret) + goto clean_dt_ret; + + soc = soc_device_match(cpsw_soc_devices); + if (soc) + cpsw->quirk_irq = 1; + + cpsw->rx_packet_max = rx_packet_max; + cpsw->descs_pool_size = descs_pool_size; + eth_random_addr(cpsw->base_mac); + + ret = cpsw_init_common(cpsw, ss_regs, ale_ageout, + (u32 __force)ss_res->start + CPSW2_BD_OFFSET, + descs_pool_size); + if (ret) + goto clean_dt_ret; + + cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ? + ss_regs + CPSW1_WR_OFFSET : + ss_regs + CPSW2_WR_OFFSET; + + ch = cpsw->quirk_irq ? 0 : 7; + cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0); + if (IS_ERR(cpsw->txv[0].ch)) { + dev_err(dev, "error initializing tx dma channel\n"); + ret = PTR_ERR(cpsw->txv[0].ch); + goto clean_cpts; + } + + cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1); + if (IS_ERR(cpsw->rxv[0].ch)) { + dev_err(dev, "error initializing rx dma channel\n"); + ret = PTR_ERR(cpsw->rxv[0].ch); + goto clean_cpts; + } + cpsw_split_res(cpsw); + + /* setup netdevs */ + ret = cpsw_create_ports(cpsw); + if (ret) + goto clean_unregister_netdev; + + /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and + * MISC IRQs which are always kept disabled with this driver so + * we will not request them. + * + * If anyone wants to implement support for those, make sure to + * first request and append them to irqs_table array. + */ + + ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt, + 0, dev_name(dev), cpsw); + if (ret < 0) { + dev_err(dev, "error attaching irq (%d)\n", ret); + goto clean_unregister_netdev; + } + + ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt, + 0, dev_name(dev), cpsw); + if (ret < 0) { + dev_err(dev, "error attaching irq (%d)\n", ret); + goto clean_unregister_netdev; + } + + ret = cpsw_register_notifiers(cpsw); + if (ret) + goto clean_unregister_netdev; + + ret = cpsw_register_devlink(cpsw); + if (ret) + goto clean_unregister_notifiers; + + ret = cpsw_register_ports(cpsw); + if (ret) + goto clean_unregister_notifiers; + + dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n", + &ss_res->start, descs_pool_size, + cpsw->version, CPSW_MAJOR_VERSION(cpsw->version), + CPSW_MINOR_VERSION(cpsw->version), + CPSW_RTL_VERSION(cpsw->version)); + + pm_runtime_put(dev); + + return 0; + +clean_unregister_notifiers: + cpsw_unregister_notifiers(cpsw); +clean_unregister_netdev: + cpsw_unregister_ports(cpsw); +clean_cpts: + cpts_release(cpsw->cpts); + cpdma_ctlr_destroy(cpsw->dma); +clean_dt_ret: + cpsw_remove_dt(cpsw); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + return ret; +} + +static int cpsw_remove(struct platform_device *pdev) +{ + struct cpsw_common *cpsw = platform_get_drvdata(pdev); + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + return ret; + } + + cpsw_unregister_notifiers(cpsw); + cpsw_unregister_devlink(cpsw); + cpsw_unregister_ports(cpsw); + + cpts_release(cpsw->cpts); + cpdma_ctlr_destroy(cpsw->dma); + cpsw_remove_dt(cpsw); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; +} + +static struct platform_driver cpsw_driver = { + .driver = { + .name = "cpsw-switch", + .of_match_table = cpsw_of_mtable, + }, + .probe = cpsw_probe, + .remove = cpsw_remove, +}; + +module_platform_driver(cpsw_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver"); diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 476d050a022c..b833cc1d188c 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -5,20 +5,415 @@ * Copyright (C) 2019 Texas Instruments */ +#include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> +#include <linux/kmemleak.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/of.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/skbuff.h> +#include <net/page_pool.h> +#include <net/pkt_cls.h> +#include "cpsw.h" #include "cpts.h" #include "cpsw_ale.h" #include "cpsw_priv.h" #include "cpsw_sl.h" #include "davinci_cpdma.h" +int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv); + +void cpsw_intr_enable(struct cpsw_common *cpsw) +{ + writel_relaxed(0xFF, &cpsw->wr_regs->tx_en); + writel_relaxed(0xFF, &cpsw->wr_regs->rx_en); + + cpdma_ctlr_int_ctrl(cpsw->dma, true); +} + +void cpsw_intr_disable(struct cpsw_common *cpsw) +{ + writel_relaxed(0, &cpsw->wr_regs->tx_en); + writel_relaxed(0, &cpsw->wr_regs->rx_en); + + cpdma_ctlr_int_ctrl(cpsw->dma, false); +} + +void cpsw_tx_handler(void *token, int len, int status) +{ + struct cpsw_meta_xdp *xmeta; + struct xdp_frame *xdpf; + struct net_device *ndev; + struct netdev_queue *txq; + struct sk_buff *skb; + int ch; + + if (cpsw_is_xdpf_handle(token)) { + xdpf = cpsw_handle_to_xdpf(token); + xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; + ndev = xmeta->ndev; + ch = xmeta->ch; + xdp_return_frame(xdpf); + } else { + skb = token; + ndev = skb->dev; + ch = skb_get_queue_mapping(skb); + cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb); + dev_kfree_skb_any(skb); + } + + /* Check whether the queue is stopped due to stalled tx dma, if the + * queue is stopped then start the queue as we have free desc for tx + */ + txq = netdev_get_tx_queue(ndev, ch); + if (unlikely(netif_tx_queue_stopped(txq))) + netif_tx_wake_queue(txq); + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += len; +} + +irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) +{ + struct cpsw_common *cpsw = dev_id; + + writel(0, &cpsw->wr_regs->tx_en); + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX); + + if (cpsw->quirk_irq) { + disable_irq_nosync(cpsw->irqs_table[1]); + cpsw->tx_irq_disabled = true; + } + + napi_schedule(&cpsw->napi_tx); + return IRQ_HANDLED; +} + +irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) +{ + struct cpsw_common *cpsw = dev_id; + + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); + writel(0, &cpsw->wr_regs->rx_en); + + if (cpsw->quirk_irq) { + disable_irq_nosync(cpsw->irqs_table[0]); + cpsw->rx_irq_disabled = true; + } + + napi_schedule(&cpsw->napi_rx); + return IRQ_HANDLED; +} + +int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); + int num_tx, cur_budget, ch; + u32 ch_map; + struct cpsw_vector *txv; + + /* process every unprocessed channel */ + ch_map = cpdma_ctrl_txchs_state(cpsw->dma); + for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) { + if (!(ch_map & 0x80)) + continue; + + txv = &cpsw->txv[ch]; + if (unlikely(txv->budget > budget - num_tx)) + cur_budget = budget - num_tx; + else + cur_budget = txv->budget; + + num_tx += cpdma_chan_process(txv->ch, cur_budget); + if (num_tx >= budget) + break; + } + + if (num_tx < budget) { + napi_complete(napi_tx); + writel(0xff, &cpsw->wr_regs->tx_en); + } + + return num_tx; +} + +int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); + int num_tx; + + num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget); + if (num_tx < budget) { + napi_complete(napi_tx); + writel(0xff, &cpsw->wr_regs->tx_en); + if (cpsw->tx_irq_disabled) { + cpsw->tx_irq_disabled = false; + enable_irq(cpsw->irqs_table[1]); + } + } + + return num_tx; +} + +int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); + int num_rx, cur_budget, ch; + u32 ch_map; + struct cpsw_vector *rxv; + + /* process every unprocessed channel */ + ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); + for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) { + if (!(ch_map & 0x01)) + continue; + + rxv = &cpsw->rxv[ch]; + if (unlikely(rxv->budget > budget - num_rx)) + cur_budget = budget - num_rx; + else + cur_budget = rxv->budget; + + num_rx += cpdma_chan_process(rxv->ch, cur_budget); + if (num_rx >= budget) + break; + } + + if (num_rx < budget) { + napi_complete_done(napi_rx, num_rx); + writel(0xff, &cpsw->wr_regs->rx_en); + } + + return num_rx; +} + +int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); + int num_rx; + + num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget); + if (num_rx < budget) { + napi_complete_done(napi_rx, num_rx); + writel(0xff, &cpsw->wr_regs->rx_en); + if (cpsw->rx_irq_disabled) { + cpsw->rx_irq_disabled = false; + enable_irq(cpsw->irqs_table[0]); + } + } + + return num_rx; +} + +void cpsw_rx_vlan_encap(struct sk_buff *skb) +{ + struct cpsw_priv *priv = netdev_priv(skb->dev); + u32 rx_vlan_encap_hdr = *((u32 *)skb->data); + struct cpsw_common *cpsw = priv->cpsw; + u16 vtag, vid, prio, pkt_type; + + /* Remove VLAN header encapsulation word */ + skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE); + + pkt_type = (rx_vlan_encap_hdr >> + CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) & + CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK; + /* Ignore unknown & Priority-tagged packets*/ + if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV || + pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG) + return; + + vid = (rx_vlan_encap_hdr >> + CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) & + VLAN_VID_MASK; + /* Ignore vid 0 and pass packet as is */ + if (!vid) + return; + + /* Untag P0 packets if set for vlan */ + if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) { + prio = (rx_vlan_encap_hdr >> + CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) & + CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK; + + vtag = (prio << VLAN_PRIO_SHIFT) | vid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); + } + + /* strip vlan tag for VLAN-tagged packet */ + if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) { + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); + skb_pull(skb, VLAN_HLEN); + } +} + +void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + slave_write(slave, mac_hi(priv->mac_addr), SA_HI); + slave_write(slave, mac_lo(priv->mac_addr), SA_LO); +} + +void soft_reset(const char *module, void __iomem *reg) +{ + unsigned long timeout = jiffies + HZ; + + writel_relaxed(1, reg); + do { + cpu_relax(); + } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies)); + + WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module); +} + +void cpsw_ndo_tx_timeout(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ch; + + cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); + ndev->stats.tx_errors++; + cpsw_intr_disable(cpsw); + for (ch = 0; ch < cpsw->tx_ch_num; ch++) { + cpdma_chan_stop(cpsw->txv[ch].ch); + cpdma_chan_start(cpsw->txv[ch].ch); + } + + cpsw_intr_enable(cpsw); + netif_trans_update(ndev); + netif_tx_wake_all_queues(ndev); +} + +static int cpsw_get_common_speed(struct cpsw_common *cpsw) +{ + int i, speed; + + for (i = 0, speed = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link) + speed += cpsw->slaves[i].phy->speed; + + return speed; +} + +int cpsw_need_resplit(struct cpsw_common *cpsw) +{ + int i, rlim_ch_num; + int speed, ch_rate; + + /* re-split resources only in case speed was changed */ + speed = cpsw_get_common_speed(cpsw); + if (speed == cpsw->speed || !speed) + return 0; + + cpsw->speed = speed; + + for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) { + ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch); + if (!ch_rate) + break; + + rlim_ch_num++; + } + + /* cases not dependent on speed */ + if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num) + return 0; + + return 1; +} + +void cpsw_split_res(struct cpsw_common *cpsw) +{ + u32 consumed_rate = 0, bigest_rate = 0; + struct cpsw_vector *txv = cpsw->txv; + int i, ch_weight, rlim_ch_num = 0; + int budget, bigest_rate_ch = 0; + u32 ch_rate, max_rate; + int ch_budget = 0; + + for (i = 0; i < cpsw->tx_ch_num; i++) { + ch_rate = cpdma_chan_get_rate(txv[i].ch); + if (!ch_rate) + continue; + + rlim_ch_num++; + consumed_rate += ch_rate; + } + + if (cpsw->tx_ch_num == rlim_ch_num) { + max_rate = consumed_rate; + } else if (!rlim_ch_num) { + ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num; + bigest_rate = 0; + max_rate = consumed_rate; + } else { + max_rate = cpsw->speed * 1000; + + /* if max_rate is less then expected due to reduced link speed, + * split proportionally according next potential max speed + */ + if (max_rate < consumed_rate) + max_rate *= 10; + + if (max_rate < consumed_rate) + max_rate *= 10; + + ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate; + ch_budget = (CPSW_POLL_WEIGHT - ch_budget) / + (cpsw->tx_ch_num - rlim_ch_num); + bigest_rate = (max_rate - consumed_rate) / + (cpsw->tx_ch_num - rlim_ch_num); + } + + /* split tx weight/budget */ + budget = CPSW_POLL_WEIGHT; + for (i = 0; i < cpsw->tx_ch_num; i++) { + ch_rate = cpdma_chan_get_rate(txv[i].ch); + if (ch_rate) { + txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate; + if (!txv[i].budget) + txv[i].budget++; + if (ch_rate > bigest_rate) { + bigest_rate_ch = i; + bigest_rate = ch_rate; + } + + ch_weight = (ch_rate * 100) / max_rate; + if (!ch_weight) + ch_weight++; + cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight); + } else { + txv[i].budget = ch_budget; + if (!bigest_rate_ch) + bigest_rate_ch = i; + cpdma_chan_set_weight(cpsw->txv[i].ch, 0); + } + + budget -= txv[i].budget; + } + + if (budget) + txv[bigest_rate_ch].budget += budget; + + /* split rx budget */ + budget = CPSW_POLL_WEIGHT; + ch_budget = budget / cpsw->rx_ch_num; + for (i = 0; i < cpsw->rx_ch_num; i++) { + cpsw->rxv[i].budget = ch_budget; + budget -= ch_budget; + } + + if (budget) + cpsw->rxv[0].budget += budget; +} + int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, int ale_ageout, phys_addr_t desc_mem_phys, int descs_pool_size) @@ -28,6 +423,7 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, struct cpsw_platform_data *data; struct cpdma_params dma_params; struct device *dev = cpsw->dev; + struct device_node *cpts_node; void __iomem *cpts_regs; int ret = 0, i; @@ -122,11 +518,859 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, return -ENOMEM; } - cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node); + cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts"); + if (!cpts_node) + cpts_node = cpsw->dev->of_node; + + cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node); if (IS_ERR(cpsw->cpts)) { ret = PTR_ERR(cpsw->cpts); cpdma_ctlr_destroy(cpsw->dma); } + of_node_put(cpts_node); + + return ret; +} + +#if IS_ENABLED(CONFIG_TI_CPTS) + +static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + u32 ts_en, seq_id; + + if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) { + slave_write(slave, 0, CPSW1_TS_CTL); + return; + } + + seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; + ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; + + if (priv->tx_ts_enabled) + ts_en |= CPSW_V1_TS_TX_EN; + + if (priv->rx_ts_enabled) + ts_en |= CPSW_V1_TS_RX_EN; + + slave_write(slave, ts_en, CPSW1_TS_CTL); + slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE); +} + +static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 ctrl, mtype; + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + + ctrl = slave_read(slave, CPSW2_CONTROL); + switch (cpsw->version) { + case CPSW_VERSION_2: + ctrl &= ~CTRL_V2_ALL_TS_MASK; + + if (priv->tx_ts_enabled) + ctrl |= CTRL_V2_TX_TS_BITS; + + if (priv->rx_ts_enabled) + ctrl |= CTRL_V2_RX_TS_BITS; + break; + case CPSW_VERSION_3: + default: + ctrl &= ~CTRL_V3_ALL_TS_MASK; + + if (priv->tx_ts_enabled) + ctrl |= CTRL_V3_TX_TS_BITS; + + if (priv->rx_ts_enabled) + ctrl |= CTRL_V3_RX_TS_BITS; + break; + } + + mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; + + slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); + slave_write(slave, ctrl, CPSW2_CONTROL); + writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype); + writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype); +} + +static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct cpsw_priv *priv = netdev_priv(dev); + struct cpsw_common *cpsw = priv->cpsw; + struct hwtstamp_config cfg; + + if (cpsw->version != CPSW_VERSION_1 && + cpsw->version != CPSW_VERSION_2 && + cpsw->version != CPSW_VERSION_3) + return -EOPNOTSUPP; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + /* reserved for future extensions */ + if (cfg.flags) + return -EINVAL; + + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + priv->rx_ts_enabled = 0; + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_NTP_ALL: + return -ERANGE; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + default: + return -ERANGE; + } + + priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON; + + switch (cpsw->version) { + case CPSW_VERSION_1: + cpsw_hwtstamp_v1(priv); + break; + case CPSW_VERSION_2: + case CPSW_VERSION_3: + cpsw_hwtstamp_v2(priv); + break; + default: + WARN_ON(1); + } + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(dev); + struct cpsw_priv *priv = netdev_priv(dev); + struct hwtstamp_config cfg; + + if (cpsw->version != CPSW_VERSION_1 && + cpsw->version != CPSW_VERSION_2 && + cpsw->version != CPSW_VERSION_3) + return -EOPNOTSUPP; + + cfg.flags = 0; + cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg.rx_filter = priv->rx_ts_enabled; + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} +#else +static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} +#endif /*CONFIG_TI_CPTS*/ + +int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct cpsw_priv *priv = netdev_priv(dev); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCSHWTSTAMP: + return cpsw_hwtstamp_set(dev, req); + case SIOCGHWTSTAMP: + return cpsw_hwtstamp_get(dev, req); + } + + if (!cpsw->slaves[slave_no].phy) + return -EOPNOTSUPP; + return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); +} + +int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 min_rate; + u32 ch_rate; + int i, ret; + + ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate; + if (ch_rate == rate) + return 0; + + ch_rate = rate * 1000; + min_rate = cpdma_chan_get_min_rate(cpsw->dma); + if ((ch_rate < min_rate && ch_rate)) { + dev_err(priv->dev, "The channel rate cannot be less than %dMbps", + min_rate); + return -EINVAL; + } + + if (rate > cpsw->speed) { + dev_err(priv->dev, "The channel rate cannot be more than 2Gbps"); + return -EINVAL; + } + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate); + pm_runtime_put(cpsw->dev); + + if (ret) + return ret; + + /* update rates for slaves tx queues */ + for (i = 0; i < cpsw->data.slaves; i++) { + slave = &cpsw->slaves[i]; + if (!slave->ndev) + continue; + + netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate; + } + + cpsw_split_res(cpsw); + return ret; +} + +static int cpsw_tc_to_fifo(int tc, int num_tc) +{ + if (tc == num_tc - 1) + return 0; + + return CPSW_FIFO_SHAPERS_NUM - tc; +} + +bool cpsw_shp_is_off(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 shift, mask, val; + + val = readl_relaxed(&cpsw->regs->ptype); + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; + mask = 7 << shift; + val = val & mask; + + return !val; +} + +static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 shift, mask, val; + + val = readl_relaxed(&cpsw->regs->ptype); + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; + mask = (1 << --fifo) << shift; + val = on ? val | mask : val & ~mask; + + writel_relaxed(val, &cpsw->regs->ptype); +} + +static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 val = 0, send_pct, shift; + struct cpsw_slave *slave; + int pct = 0, i; + + if (bw > priv->shp_cfg_speed * 1000) + goto err; + + /* shaping has to stay enabled for highest fifos linearly + * and fifo bw no more then interface can allow + */ + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + send_pct = slave_read(slave, SEND_PERCENT); + for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) { + if (!bw) { + if (i >= fifo || !priv->fifo_bw[i]) + continue; + + dev_warn(priv->dev, "Prev FIFO%d is shaped", i); + continue; + } + + if (!priv->fifo_bw[i] && i > fifo) { + dev_err(priv->dev, "Upper FIFO%d is not shaped", i); + return -EINVAL; + } + + shift = (i - 1) * 8; + if (i == fifo) { + send_pct &= ~(CPSW_PCT_MASK << shift); + val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10); + if (!val) + val = 1; + + send_pct |= val << shift; + pct += val; + continue; + } + + if (priv->fifo_bw[i]) + pct += (send_pct >> shift) & CPSW_PCT_MASK; + } + + if (pct >= 100) + goto err; + + slave_write(slave, send_pct, SEND_PERCENT); + priv->fifo_bw[fifo] = bw; + + dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo, + DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100)); + + return 0; +err: + dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration"); + return -EINVAL; +} + +static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + u32 tx_in_ctl_rg, val; + int ret; + + ret = cpsw_set_fifo_bw(priv, fifo, bw); + if (ret) + return ret; + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL; + + if (!bw) + cpsw_fifo_shp_on(priv, fifo, bw); + + val = slave_read(slave, tx_in_ctl_rg); + if (cpsw_shp_is_off(priv)) { + /* disable FIFOs rate limited queues */ + val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT); + + /* set type of FIFO queues to normal priority mode */ + val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT); + + /* set type of FIFO queues to be rate limited */ + if (bw) + val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT; + else + priv->shp_cfg_speed = 0; + } + + /* toggle a FIFO rate limited queue */ + if (bw) + val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); + else + val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); + slave_write(slave, val, tx_in_ctl_rg); + + /* FIFO transmit shape enable */ + cpsw_fifo_shp_on(priv, fifo, bw); + return 0; +} + +/* Defaults: + * class A - prio 3 + * class B - prio 2 + * shaping for class A should be set first + */ +static int cpsw_set_cbs(struct net_device *ndev, + struct tc_cbs_qopt_offload *qopt) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int prev_speed = 0; + int tc, ret, fifo; + u32 bw = 0; + + tc = netdev_txq_to_tc(priv->ndev, qopt->queue); + + /* enable channels in backward order, as highest FIFOs must be rate + * limited first and for compliance with CPDMA rate limited channels + * that also used in bacward order. FIFO0 cannot be rate limited. + */ + fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); + if (!fifo) { + dev_err(priv->dev, "Last tc%d can't be rate limited", tc); + return -EINVAL; + } + + /* do nothing, it's disabled anyway */ + if (!qopt->enable && !priv->fifo_bw[fifo]) + return 0; + + /* shapers can be set if link speed is known */ + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + if (slave->phy && slave->phy->link) { + if (priv->shp_cfg_speed && + priv->shp_cfg_speed != slave->phy->speed) + prev_speed = priv->shp_cfg_speed; + + priv->shp_cfg_speed = slave->phy->speed; + } + + if (!priv->shp_cfg_speed) { + dev_err(priv->dev, "Link speed is not known"); + return -1; + } + + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + bw = qopt->enable ? qopt->idleslope : 0; + ret = cpsw_set_fifo_rlimit(priv, fifo, bw); + if (ret) { + priv->shp_cfg_speed = prev_speed; + prev_speed = 0; + } + + if (bw && prev_speed) + dev_warn(priv->dev, + "Speed was changed, CBS shaper speeds are changed!"); + + pm_runtime_put_sync(cpsw->dev); + return ret; +} + +static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio = type_data; + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int fifo, num_tc, count, offset; + struct cpsw_slave *slave; + u32 tx_prio_map = 0; + int i, tc, ret; + + num_tc = mqprio->qopt.num_tc; + if (num_tc > CPSW_TC_NUM) + return -EINVAL; + + if (mqprio->mode != TC_MQPRIO_MODE_DCB) + return -EINVAL; + ret = pm_runtime_get_sync(cpsw->dev); + if (ret < 0) { + pm_runtime_put_noidle(cpsw->dev); + return ret; + } + + if (num_tc) { + for (i = 0; i < 8; i++) { + tc = mqprio->qopt.prio_tc_map[i]; + fifo = cpsw_tc_to_fifo(tc, num_tc); + tx_prio_map |= fifo << (4 * i); + } + + netdev_set_num_tc(ndev, num_tc); + for (i = 0; i < num_tc; i++) { + count = mqprio->qopt.count[i]; + offset = mqprio->qopt.offset[i]; + netdev_set_tc_queue(ndev, i, count, offset); + } + } + + if (!mqprio->qopt.hw) { + /* restore default configuration */ + netdev_reset_tc(ndev); + tx_prio_map = TX_PRIORITY_MAPPING; + } + + priv->mqprio_hw = mqprio->qopt.hw; + + offset = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; + + slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; + slave_write(slave, tx_prio_map, offset); + + pm_runtime_put_sync(cpsw->dev); + + return 0; +} + +int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_CBS: + return cpsw_set_cbs(ndev, type_data); + + case TC_SETUP_QDISC_MQPRIO: + return cpsw_set_mqprio(ndev, type_data); + + default: + return -EOPNOTSUPP; + } +} + +void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + int fifo, bw; + + for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) { + bw = priv->fifo_bw[fifo]; + if (!bw) + continue; + + cpsw_set_fifo_rlimit(priv, fifo, bw); + } +} + +void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 tx_prio_map = 0; + int i, tc, fifo; + u32 tx_prio_rg; + + if (!priv->mqprio_hw) + return; + + for (i = 0; i < 8; i++) { + tc = netdev_get_prio_tc_map(priv->ndev, i); + fifo = CPSW_FIFO_SHAPERS_NUM - tc; + tx_prio_map |= fifo << (4 * i); + } + + tx_prio_rg = cpsw->version == CPSW_VERSION_1 ? + CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; + + slave_write(slave, tx_prio_map, tx_prio_rg); +} + +int cpsw_fill_rx_channels(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_meta_xdp *xmeta; + struct page_pool *pool; + struct page *page; + int ch_buf_num; + int ch, i, ret; + dma_addr_t dma; + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + pool = cpsw->page_pool[ch]; + ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); + for (i = 0; i < ch_buf_num; i++) { + page = page_pool_dev_alloc_pages(pool); + if (!page) { + cpsw_err(priv, ifup, "allocate rx page err\n"); + return -ENOMEM; + } + + xmeta = page_address(page) + CPSW_XMETA_OFFSET; + xmeta->ndev = priv->ndev; + xmeta->ch = ch; + + dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM; + ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch, + page, dma, + cpsw->rx_packet_max, + 0); + if (ret < 0) { + cpsw_err(priv, ifup, + "cannot submit page to channel %d rx, error %d\n", + ch, ret); + page_pool_recycle_direct(pool, page); + return ret; + } + } + + cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n", + ch, ch_buf_num); + } + + return 0; +} + +static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, + int size) +{ + struct page_pool_params pp_params; + struct page_pool *pool; + + pp_params.order = 0; + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = size; + pp_params.nid = NUMA_NO_NODE; + pp_params.dma_dir = DMA_BIDIRECTIONAL; + pp_params.dev = cpsw->dev; + + pool = page_pool_create(&pp_params); + if (IS_ERR(pool)) + dev_err(cpsw->dev, "cannot create rx page pool\n"); + + return pool; +} + +static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch) +{ + struct page_pool *pool; + int ret = 0, pool_size; + + pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); + pool = cpsw_create_page_pool(cpsw, pool_size); + if (IS_ERR(pool)) + ret = PTR_ERR(pool); + else + cpsw->page_pool[ch] = pool; + + return ret; +} + +static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct xdp_rxq_info *rxq; + struct page_pool *pool; + int ret; + + pool = cpsw->page_pool[ch]; + rxq = &priv->xdp_rxq[ch]; + + ret = xdp_rxq_info_reg(rxq, priv->ndev, ch); + if (ret) + return ret; + + ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); + if (ret) + xdp_rxq_info_unreg(rxq); + + return ret; +} + +static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch) +{ + struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch]; + + if (!xdp_rxq_info_is_reg(rxq)) + return; + + xdp_rxq_info_unreg(rxq); +} + +void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw) +{ + struct net_device *ndev; + int i, ch; + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + for (i = 0; i < cpsw->data.slaves; i++) { + ndev = cpsw->slaves[i].ndev; + if (!ndev) + continue; + + cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch); + } + + page_pool_destroy(cpsw->page_pool[ch]); + cpsw->page_pool[ch] = NULL; + } +} + +int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw) +{ + struct net_device *ndev; + int i, ch, ret; + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + ret = cpsw_create_rx_pool(cpsw, ch); + if (ret) + goto err_cleanup; + + /* using same page pool is allowed as no running rx handlers + * simultaneously for both ndevs + */ + for (i = 0; i < cpsw->data.slaves; i++) { + ndev = cpsw->slaves[i].ndev; + if (!ndev) + continue; + + ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch); + if (ret) + goto err_cleanup; + } + } + + return 0; + +err_cleanup: + cpsw_destroy_xdp_rxqs(cpsw); + + return ret; +} + +static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf) +{ + struct bpf_prog *prog = bpf->prog; + + if (!priv->xdpi.prog && !prog) + return 0; + + if (!xdp_attachment_flags_ok(&priv->xdpi, bpf)) + return -EBUSY; + + WRITE_ONCE(priv->xdp_prog, prog); + + xdp_attachment_setup(&priv->xdpi, bpf); + + return 0; +} + +int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return cpsw_xdp_prog_setup(priv, bpf); + + case XDP_QUERY_PROG: + return xdp_attachment_query(&priv->xdpi, bpf); + + default: + return -EINVAL; + } +} + +int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf, + struct page *page, int port) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_meta_xdp *xmeta; + struct cpdma_chan *txch; + dma_addr_t dma; + int ret; + + xmeta = (void *)xdpf + CPSW_XMETA_OFFSET; + xmeta->ndev = priv->ndev; + xmeta->ch = 0; + txch = cpsw->txv[0].ch; + + if (page) { + dma = page_pool_get_dma_addr(page); + dma += xdpf->headroom + sizeof(struct xdp_frame); + ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf), + dma, xdpf->len, port); + } else { + if (sizeof(*xmeta) > xdpf->headroom) { + xdp_return_frame_rx_napi(xdpf); + return -EINVAL; + } + + ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf), + xdpf->data, xdpf->len, port); + } + + if (ret) { + priv->ndev->stats.tx_dropped++; + xdp_return_frame_rx_napi(xdpf); + } + + return ret; +} + +int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, + struct page *page, int port) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct net_device *ndev = priv->ndev; + int ret = CPSW_XDP_CONSUMED; + struct xdp_frame *xdpf; + struct bpf_prog *prog; + u32 act; + + rcu_read_lock(); + + prog = READ_ONCE(priv->xdp_prog); + if (!prog) { + ret = CPSW_XDP_PASS; + goto out; + } + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: + ret = CPSW_XDP_PASS; + break; + case XDP_TX: + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) + goto drop; + + cpsw_xdp_tx_frame(priv, xdpf, page, port); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(ndev, xdp, prog)) + goto drop; + + /* Have to flush here, per packet, instead of doing it in bulk + * at the end of the napi handler. The RX devices on this + * particular hardware is sharing a common queue, so the + * incoming device might change per packet. + */ + xdp_do_flush_map(); + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(ndev, prog, act); + /* fall through -- handle aborts by dropping packet */ + case XDP_DROP: + goto drop; + } +out: + rcu_read_unlock(); + return ret; +drop: + rcu_read_unlock(); + page_pool_recycle_direct(cpsw->page_pool[ch], page); return ret; } diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 8bfa761fa552..bc726356a72c 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -54,6 +54,7 @@ do { \ #define HOST_PORT_NUM 0 #define CPSW_ALE_PORTS_NUM 3 +#define CPSW_SLAVE_PORTS_NUM 2 #define SLIVER_SIZE 0x40 #define CPSW1_HOST_PORT_OFFSET 0x028 @@ -65,6 +66,7 @@ do { \ #define CPSW1_CPTS_OFFSET 0x500 #define CPSW1_ALE_OFFSET 0x600 #define CPSW1_SLIVER_OFFSET 0x700 +#define CPSW1_WR_OFFSET 0x900 #define CPSW2_HOST_PORT_OFFSET 0x108 #define CPSW2_SLAVE_OFFSET 0x200 @@ -76,6 +78,7 @@ do { \ #define CPSW2_ALE_OFFSET 0xd00 #define CPSW2_SLIVER_OFFSET 0xd80 #define CPSW2_BD_OFFSET 0x2000 +#define CPSW2_WR_OFFSET 0x1200 #define CPDMA_RXTHRESH 0x0c0 #define CPDMA_RXFREE 0x0e0 @@ -113,12 +116,15 @@ do { \ #define IRQ_NUM 2 #define CPSW_MAX_QUEUES 8 #define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256 +#define CPSW_ALE_AGEOUT_DEFAULT 10 /* sec */ +#define CPSW_ALE_NUM_ENTRIES 1024 #define CPSW_FIFO_QUEUE_TYPE_SHIFT 16 #define CPSW_FIFO_SHAPE_EN_SHIFT 16 #define CPSW_FIFO_RATE_EN_SHIFT 20 #define CPSW_TC_NUM 4 #define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1) #define CPSW_PCT_MASK 0x7f +#define CPSW_BD_RAM_SIZE 0x2000 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29 #define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0) @@ -279,6 +285,7 @@ struct cpsw_slave_data { u8 mac_addr[ETH_ALEN]; u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ struct phy *ifphy; + bool disabled; }; struct cpsw_platform_data { @@ -286,9 +293,9 @@ struct cpsw_platform_data { u32 ss_reg_ofs; /* Subsystem control register offset */ u32 channels; /* number of cpdma channels (symmetric) */ u32 slaves; /* number of slave cpgmac ports */ - u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ + u32 active_slave;/* time stamping, ethtool and SIOCGMIIPHY slave */ u32 ale_entries; /* ale table size */ - u32 bd_ram_size; /*buffer descriptor ram size */ + u32 bd_ram_size; /*buffer descriptor ram size */ u32 mac_control; /* Mac control register */ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ bool dual_emac; /* Enable Dual EMAC mode */ @@ -344,10 +351,15 @@ struct cpsw_common { bool tx_irq_disabled; u32 irqs_table[IRQ_NUM]; struct cpts *cpts; + struct devlink *devlink; int rx_ch_num, tx_ch_num; int speed; int usage_count; struct page_pool *page_pool[CPSW_MAX_QUEUES]; + u8 br_members; + struct net_device *hw_bridge_dev; + bool ale_bypass; + u8 base_mac[ETH_ALEN]; }; struct cpsw_priv { @@ -368,19 +380,14 @@ struct cpsw_priv { u32 emac_port; struct cpsw_common *cpsw; + int offload_fwd_mark; }; #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw) #define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi) -#define cpsw_slave_index(cpsw, priv) \ - ((cpsw->data.dual_emac) ? priv->emac_port : \ - cpsw->data.active_slave) - -static inline int cpsw_get_slave_port(u32 slave_num) -{ - return slave_num + 1; -} +extern int (*cpsw_slave_index)(struct cpsw_common *cpsw, + struct cpsw_priv *priv); struct addr_sync_ctx { struct net_device *ndev; @@ -389,6 +396,35 @@ struct addr_sync_ctx { int flush; /* flush flag */ }; +#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long)) + +#define CPSW_XDP_CONSUMED 1 +#define CPSW_XDP_PASS 0 + +struct __aligned(sizeof(long)) cpsw_meta_xdp { + struct net_device *ndev; + int ch; +}; + +/* The buf includes headroom compatible with both skb and xdpf */ +#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN) +#define CPSW_HEADROOM ALIGN(CPSW_HEADROOM_NA, sizeof(long)) + +static inline int cpsw_is_xdpf_handle(void *handle) +{ + return (unsigned long)handle & BIT(0); +} + +static inline void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf) +{ + return (void *)((unsigned long)xdpf | BIT(0)); +} + +static inline struct xdp_frame *cpsw_handle_to_xdpf(void *handle) +{ + return (struct xdp_frame *)((unsigned long)handle & ~BIT(0)); +} + int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs, int ale_ageout, phys_addr_t desc_mem_phys, int descs_pool_size); @@ -399,6 +435,29 @@ void cpsw_intr_disable(struct cpsw_common *cpsw); void cpsw_tx_handler(void *token, int len, int status); int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw); void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw); +int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf); +int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf, + struct page *page, int port); +int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, + struct page *page, int port); +irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id); +irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id); +int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget); +int cpsw_tx_poll(struct napi_struct *napi_tx, int budget); +int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget); +int cpsw_rx_poll(struct napi_struct *napi_rx, int budget); +void cpsw_rx_vlan_encap(struct sk_buff *skb); +void soft_reset(const char *module, void __iomem *reg); +void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv); +void cpsw_ndo_tx_timeout(struct net_device *ndev); +int cpsw_need_resplit(struct cpsw_common *cpsw); +int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd); +int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate); +int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data); +bool cpsw_shp_is_off(struct cpsw_priv *priv); +void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); +void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); /* ethtool */ u32 cpsw_get_msglevel(struct net_device *ndev); diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c new file mode 100644 index 000000000000..985a929bb957 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_switchdev.c @@ -0,0 +1,589 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Texas Instruments switchdev Driver + * + * Copyright (C) 2019 Texas Instruments + * + */ + +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <linux/netdevice.h> +#include <linux/workqueue.h> +#include <net/switchdev.h> + +#include "cpsw.h" +#include "cpsw_ale.h" +#include "cpsw_priv.h" +#include "cpsw_switchdev.h" + +struct cpsw_switchdev_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct cpsw_priv *priv; + unsigned long event; +}; + +static int cpsw_port_stp_state_set(struct cpsw_priv *priv, + struct switchdev_trans *trans, u8 state) +{ + struct cpsw_common *cpsw = priv->cpsw; + u8 cpsw_state; + int ret = 0; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + switch (state) { + case BR_STATE_FORWARDING: + cpsw_state = ALE_PORT_STATE_FORWARD; + break; + case BR_STATE_LEARNING: + cpsw_state = ALE_PORT_STATE_LEARN; + break; + case BR_STATE_DISABLED: + cpsw_state = ALE_PORT_STATE_DISABLE; + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + cpsw_state = ALE_PORT_STATE_BLOCK; + break; + default: + return -EOPNOTSUPP; + } + + ret = cpsw_ale_control_set(cpsw->ale, priv->emac_port, + ALE_PORT_STATE, cpsw_state); + dev_dbg(priv->dev, "ale state: %u\n", cpsw_state); + + return ret; +} + +static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv, + struct switchdev_trans *trans, + struct net_device *orig_dev, + unsigned long brport_flags) +{ + struct cpsw_common *cpsw = priv->cpsw; + bool unreg_mcast_add = false; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (brport_flags & BR_MCAST_FLOOD) + unreg_mcast_add = true; + dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n", + unreg_mcast_add, priv->emac_port); + + cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port), + unreg_mcast_add); + + return 0; +} + +static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev, + struct switchdev_trans *trans, + unsigned long flags) +{ + if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static int cpsw_port_attr_set(struct net_device *ndev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + dev_dbg(priv->dev, "attr: id %u port: %u\n", attr->id, priv->emac_port); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + ret = cpsw_port_attr_br_flags_pre_set(ndev, trans, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + ret = cpsw_port_stp_state_set(priv, trans, attr->u.stp_state); + dev_dbg(priv->dev, "stp state: %u\n", attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + ret = cpsw_port_attr_br_flags_set(priv, trans, attr->orig_dev, + attr->u.brport_flags); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static u16 cpsw_get_pvid(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + u32 __iomem *port_vlan_reg; + u32 pvid; + + if (priv->emac_port) { + int reg = CPSW2_PORT_VLAN; + + if (cpsw->version == CPSW_VERSION_1) + reg = CPSW1_PORT_VLAN; + pvid = slave_read(cpsw->slaves + (priv->emac_port - 1), reg); + } else { + port_vlan_reg = &cpsw->host_port_regs->port_vlan; + pvid = readl(port_vlan_reg); + } + + pvid = pvid & 0xfff; + + return pvid; +} + +static void cpsw_set_pvid(struct cpsw_priv *priv, u16 vid, bool cfi, u32 cos) +{ + struct cpsw_common *cpsw = priv->cpsw; + void __iomem *port_vlan_reg; + u32 pvid; + + pvid = vid; + pvid |= cfi ? BIT(12) : 0; + pvid |= (cos & 0x7) << 13; + + if (priv->emac_port) { + int reg = CPSW2_PORT_VLAN; + + if (cpsw->version == CPSW_VERSION_1) + reg = CPSW1_PORT_VLAN; + /* no barrier */ + slave_write(cpsw->slaves + (priv->emac_port - 1), pvid, reg); + } else { + /* CPU port */ + port_vlan_reg = &cpsw->host_port_regs->port_vlan; + writel(pvid, port_vlan_reg); + } +} + +static int cpsw_port_vlan_add(struct cpsw_priv *priv, bool untag, bool pvid, + u16 vid, struct net_device *orig_dev) +{ + bool cpu_port = netif_is_bridge_master(orig_dev); + struct cpsw_common *cpsw = priv->cpsw; + int unreg_mcast_mask = 0; + int reg_mcast_mask = 0; + int untag_mask = 0; + int port_mask; + int ret = 0; + u32 flags; + + if (cpu_port) { + port_mask = BIT(HOST_PORT_NUM); + flags = orig_dev->flags; + unreg_mcast_mask = port_mask; + } else { + port_mask = BIT(priv->emac_port); + flags = priv->ndev->flags; + } + + if (flags & IFF_MULTICAST) + reg_mcast_mask = port_mask; + + if (untag) + untag_mask = port_mask; + + ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask, + reg_mcast_mask, unreg_mcast_mask); + if (ret) { + dev_err(priv->dev, "Unable to add vlan\n"); + return ret; + } + + if (cpu_port) + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + if (!pvid) + return ret; + + cpsw_set_pvid(priv, vid, 0, 0); + + dev_dbg(priv->dev, "VID add: %s: vid:%u ports:%X\n", + priv->ndev->name, vid, port_mask); + return ret; +} + +static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid, + struct net_device *orig_dev) +{ + bool cpu_port = netif_is_bridge_master(orig_dev); + struct cpsw_common *cpsw = priv->cpsw; + int port_mask; + int ret = 0; + + if (cpu_port) + port_mask = BIT(HOST_PORT_NUM); + else + port_mask = BIT(priv->emac_port); + + ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask); + if (ret != 0) + return ret; + + /* We don't care for the return value here, error is returned only if + * the unicast entry is not present + */ + if (cpu_port) + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + + if (vid == cpsw_get_pvid(priv)) + cpsw_set_pvid(priv, 0, 0, 0); + + /* We don't care for the return value here, error is returned only if + * the multicast entry is not present + */ + cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, + port_mask, ALE_VLAN, vid); + dev_dbg(priv->dev, "VID del: %s: vid:%u ports:%X\n", + priv->ndev->name, vid, port_mask); + + return ret; +} + +static int cpsw_port_vlans_add(struct cpsw_priv *priv, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + struct net_device *orig_dev = vlan->obj.orig_dev; + bool cpu_port = netif_is_bridge_master(orig_dev); + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + u16 vid; + + dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n", + priv->ndev->name, vlan->vid_begin, vlan->flags); + + if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY)) + return 0; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + int err; + + err = cpsw_port_vlan_add(priv, untag, pvid, vid, orig_dev); + if (err) + return err; + } + + return 0; +} + +static int cpsw_port_vlans_del(struct cpsw_priv *priv, + const struct switchdev_obj_port_vlan *vlan) + +{ + struct net_device *orig_dev = vlan->obj.orig_dev; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + int err; + + err = cpsw_port_vlan_del(priv, vid, orig_dev); + if (err) + return err; + } + + return 0; +} + +static int cpsw_port_mdb_add(struct cpsw_priv *priv, + struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) + +{ + struct net_device *orig_dev = mdb->obj.orig_dev; + bool cpu_port = netif_is_bridge_master(orig_dev); + struct cpsw_common *cpsw = priv->cpsw; + int port_mask; + int err; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (cpu_port) + port_mask = BIT(HOST_PORT_NUM); + else + port_mask = BIT(priv->emac_port); + + err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask, + ALE_VLAN, mdb->vid, 0); + dev_dbg(priv->dev, "MDB add: %s: vid %u:%pM ports: %X\n", + priv->ndev->name, mdb->vid, mdb->addr, port_mask); + + return err; +} + +static int cpsw_port_mdb_del(struct cpsw_priv *priv, + struct switchdev_obj_port_mdb *mdb) + +{ + struct net_device *orig_dev = mdb->obj.orig_dev; + bool cpu_port = netif_is_bridge_master(orig_dev); + struct cpsw_common *cpsw = priv->cpsw; + int del_mask; + int err; + + if (cpu_port) + del_mask = BIT(HOST_PORT_NUM); + else + del_mask = BIT(priv->emac_port); + + err = cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask, + ALE_VLAN, mdb->vid); + dev_dbg(priv->dev, "MDB del: %s: vid %u:%pM ports: %X\n", + priv->ndev->name, mdb->vid, mdb->addr, del_mask); + + return err; +} + +static int cpsw_port_obj_add(struct net_device *ndev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack) +{ + struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + struct cpsw_priv *priv = netdev_priv(ndev); + int err = 0; + + dev_dbg(priv->dev, "obj_add: id %u port: %u\n", + obj->id, priv->emac_port); + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = cpsw_port_vlans_add(priv, vlan, trans); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = cpsw_port_mdb_add(priv, mdb, trans); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int cpsw_port_obj_del(struct net_device *ndev, + const struct switchdev_obj *obj) +{ + struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + struct cpsw_priv *priv = netdev_priv(ndev); + int err = 0; + + dev_dbg(priv->dev, "obj_del: id %u port: %u\n", + obj->id, priv->emac_port); + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = cpsw_port_vlans_del(priv, vlan); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = cpsw_port_mdb_del(priv, mdb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static void cpsw_fdb_offload_notify(struct net_device *ndev, + struct switchdev_notifier_fdb_info *rcv) +{ + struct switchdev_notifier_fdb_info info; + + info.addr = rcv->addr; + info.vid = rcv->vid; + info.offloaded = true; + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, + ndev, &info.info, NULL); +} + +static void cpsw_switchdev_event_work(struct work_struct *work) +{ + struct cpsw_switchdev_event_work *switchdev_work = + container_of(work, struct cpsw_switchdev_event_work, work); + struct cpsw_priv *priv = switchdev_work->priv; + struct switchdev_notifier_fdb_info *fdb; + struct cpsw_common *cpsw = priv->cpsw; + int port = priv->emac_port; + + rtnl_lock(); + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + fdb = &switchdev_work->fdb_info; + + dev_dbg(cpsw->dev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n", + fdb->addr, fdb->vid, fdb->added_by_user, + fdb->offloaded, port); + + if (!fdb->added_by_user) + break; + if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) + port = HOST_PORT_NUM; + + cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port, + fdb->vid ? ALE_VLAN : 0, fdb->vid); + cpsw_fdb_offload_notify(priv->ndev, fdb); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb = &switchdev_work->fdb_info; + + dev_dbg(cpsw->dev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n", + fdb->addr, fdb->vid, fdb->added_by_user, + fdb->offloaded, port); + + if (!fdb->added_by_user) + break; + if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0) + port = HOST_PORT_NUM; + + cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port, + fdb->vid ? ALE_VLAN : 0, fdb->vid); + break; + default: + break; + } + rtnl_unlock(); + + kfree(switchdev_work->fdb_info.addr); + kfree(switchdev_work); + dev_put(priv->ndev); +} + +/* called under rcu_read_lock() */ +static int cpsw_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *ndev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_fdb_info *fdb_info = ptr; + struct cpsw_switchdev_event_work *switchdev_work; + struct cpsw_priv *priv = netdev_priv(ndev); + int err; + + if (event == SWITCHDEV_PORT_ATTR_SET) { + err = switchdev_handle_port_attr_set(ndev, ptr, + cpsw_port_dev_check, + cpsw_port_attr_set); + return notifier_from_errno(err); + } + + if (!cpsw_port_dev_check(ndev)) + return NOTIFY_DONE; + + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (WARN_ON(!switchdev_work)) + return NOTIFY_BAD; + + INIT_WORK(&switchdev_work->work, cpsw_switchdev_event_work); + switchdev_work->priv = priv; + switchdev_work->event = event; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + memcpy(&switchdev_work->fdb_info, ptr, + sizeof(switchdev_work->fdb_info)); + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!switchdev_work->fdb_info.addr) + goto err_addr_alloc; + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, + fdb_info->addr); + dev_hold(ndev); + break; + default: + kfree(switchdev_work); + return NOTIFY_DONE; + } + + queue_work(system_long_wq, &switchdev_work->work); + + return NOTIFY_DONE; + +err_addr_alloc: + kfree(switchdev_work); + return NOTIFY_BAD; +} + +static struct notifier_block cpsw_switchdev_notifier = { + .notifier_call = cpsw_switchdev_event, +}; + +static int cpsw_switchdev_blocking_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + cpsw_port_dev_check, + cpsw_port_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + cpsw_port_dev_check, + cpsw_port_obj_del); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + cpsw_port_dev_check, + cpsw_port_attr_set); + return notifier_from_errno(err); + default: + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block cpsw_switchdev_bl_notifier = { + .notifier_call = cpsw_switchdev_blocking_event, +}; + +int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw) +{ + int ret = 0; + + ret = register_switchdev_notifier(&cpsw_switchdev_notifier); + if (ret) { + dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n", + ret); + return ret; + } + + ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier); + if (ret) { + dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n", + ret); + unregister_switchdev_notifier(&cpsw_switchdev_notifier); + } + + return ret; +} + +void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw) +{ + unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier); + unregister_switchdev_notifier(&cpsw_switchdev_notifier); +} diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.h b/drivers/net/ethernet/ti/cpsw_switchdev.h new file mode 100644 index 000000000000..04a045dba7d4 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_switchdev.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Texas Instruments Ethernet Switch Driver + */ + +#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_ +#define DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_ + +#include <net/switchdev.h> + +bool cpsw_port_dev_check(const struct net_device *dev); +int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw); +void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw); + +#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_ */ diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 4209d1cf57f6..034dbab7aa80 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -609,7 +609,8 @@ struct nvsp_5_send_indirect_table { /* The number of entries in the send indirection table */ u32 count; - /* The offset of the send indirection table from top of this struct. + /* The offset of the send indirection table from the beginning of + * struct nvsp_message. * The send indirection table tells which channel to put the send * traffic on. Each entry is a channel number. */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index d22a36fc7a7c..eab83e71567a 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1178,20 +1178,39 @@ static int netvsc_receive(struct net_device *ndev, } static void netvsc_send_table(struct net_device *ndev, - const struct nvsp_message *nvmsg) + struct netvsc_device *nvscdev, + const struct nvsp_message *nvmsg, + u32 msglen) { struct net_device_context *net_device_ctx = netdev_priv(ndev); - u32 count, *tab; + u32 count, offset, *tab; int i; count = nvmsg->msg.v5_msg.send_table.count; + offset = nvmsg->msg.v5_msg.send_table.offset; + if (count != VRSS_SEND_TAB_SIZE) { netdev_err(ndev, "Received wrong send-table size:%u\n", count); return; } - tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table + - nvmsg->msg.v5_msg.send_table.offset); + /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be + * wrong due to a host bug. So fix the offset here. + */ + if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 && + msglen >= sizeof(struct nvsp_message_header) + + sizeof(union nvsp_6_message_uber) + count * sizeof(u32)) + offset = sizeof(struct nvsp_message_header) + + sizeof(union nvsp_6_message_uber); + + /* Boundary check for all versions */ + if (offset > msglen - count * sizeof(u32)) { + netdev_err(ndev, "Received send-table offset too big:%u\n", + offset); + return; + } + + tab = (void *)nvmsg + offset; for (i = 0; i < count; i++) net_device_ctx->tx_table[i] = tab[i]; @@ -1209,12 +1228,14 @@ static void netvsc_send_vf(struct net_device *ndev, net_device_ctx->vf_alloc ? "added" : "removed"); } -static void netvsc_receive_inband(struct net_device *ndev, - const struct nvsp_message *nvmsg) +static void netvsc_receive_inband(struct net_device *ndev, + struct netvsc_device *nvscdev, + const struct nvsp_message *nvmsg, + u32 msglen) { switch (nvmsg->hdr.msg_type) { case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: - netvsc_send_table(ndev, nvmsg); + netvsc_send_table(ndev, nvscdev, nvmsg, msglen); break; case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: @@ -1232,6 +1253,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device, { struct vmbus_channel *channel = nvchan->channel; const struct nvsp_message *nvmsg = hv_pkt_data(desc); + u32 msglen = hv_pkt_datalen(desc); trace_nvsp_recv(ndev, channel, nvmsg); @@ -1247,7 +1269,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device, break; case VM_PKT_DATA_INBAND: - netvsc_receive_inband(ndev, nvmsg); + netvsc_receive_inband(ndev, net_device, nvmsg, msglen); break; default: diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index 8af5b7e9f4ed..c92a62dbf398 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -74,9 +74,9 @@ config IEEE802154_ATUSB The module will be called 'atusb'. config IEEE802154_ADF7242 - tristate "ADF7242 transceiver driver" - depends on IEEE802154_DRIVERS && MAC802154 - depends on SPI + tristate "ADF7242 transceiver driver" + depends on IEEE802154_DRIVERS && MAC802154 + depends on SPI ---help--- Say Y here to enable the ADF7242 SPI 802.15.4 wireless controller. @@ -107,9 +107,9 @@ config IEEE802154_CA8210_DEBUGFS management entities. config IEEE802154_MCR20A - tristate "MCR20A transceiver driver" - depends on IEEE802154_DRIVERS && MAC802154 - depends on SPI + tristate "MCR20A transceiver driver" + depends on IEEE802154_DRIVERS && MAC802154 + depends on SPI ---help--- Say Y here to enable the MCR20A SPI 802.15.4 wireless controller. diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index fcd418716c10..1c7a7c57dec3 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -239,7 +239,7 @@ static int dp83869_configure_rgmii(struct phy_device *phydev, dp83869->io_impedance & DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL); - return ret; + return 0; } static int dp83869_configure_mode(struct phy_device *phydev, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 3b99882692e3..1bf13017d288 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -26,6 +26,7 @@ #include <linux/hwmon.h> #include <linux/marvell_phy.h> #include <linux/phy.h> +#include <linux/sfp.h> #define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe #define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa) @@ -206,6 +207,28 @@ static int mv3310_hwmon_probe(struct phy_device *phydev) } #endif +static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) +{ + struct phy_device *phydev = upstream; + __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, }; + phy_interface_t iface; + + sfp_parse_support(phydev->sfp_bus, id, support); + iface = sfp_select_interface(phydev->sfp_bus, id, support); + + if (iface != PHY_INTERFACE_MODE_10GKR) { + dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n"); + return -EINVAL; + } + return 0; +} + +static const struct sfp_upstream_ops mv3310_sfp_ops = { + .attach = phy_sfp_attach, + .detach = phy_sfp_detach, + .module_insert = mv3310_sfp_insert, +}; + static int mv3310_probe(struct phy_device *phydev) { struct mv3310_priv *priv; @@ -236,7 +259,7 @@ static int mv3310_probe(struct phy_device *phydev) if (ret) return ret; - return 0; + return phy_sfp_probe(phydev, &mv3310_sfp_ops); } static int mv3310_suspend(struct phy_device *phydev) diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 58d6504495e0..f798de3276dc 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -145,8 +145,11 @@ err_out_free_mdiobus: static int sun4i_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); + struct sun4i_mdio_data *data = bus->priv; mdiobus_unregister(bus); + if (data->regulator) + regulator_disable(data->regulator); mdiobus_free(bus); return 0; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 35876562e32a..dbacb0031877 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -65,7 +65,7 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev) reset = devm_reset_control_get_exclusive(&mdiodev->dev, "phy"); if (IS_ERR(reset)) { - if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOSYS) + if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP) reset = NULL; else return PTR_ERR(reset); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 105d389b58e7..80be4d691e5b 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -23,6 +23,7 @@ #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/phy_led_triggers.h> +#include <linux/sfp.h> #include <linux/workqueue.h> #include <linux/mdio.h> #include <linux/io.h> @@ -252,66 +253,6 @@ static void phy_sanitize_settings(struct phy_device *phydev) } } -/** - * phy_ethtool_sset - generic ethtool sset function, handles all the details - * @phydev: target phy_device struct - * @cmd: ethtool_cmd - * - * A few notes about parameter checking: - * - * - We don't set port or transceiver, so we don't care what they - * were set to. - * - phy_start_aneg() will make sure forced settings are sane, and - * choose the next best ones from the ones selected, so we don't - * care if ethtool tries to give us bad values. - */ -int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) -{ - __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); - u32 speed = ethtool_cmd_speed(cmd); - - if (cmd->phy_address != phydev->mdio.addr) - return -EINVAL; - - /* We make sure that we don't pass unsupported values in to the PHY */ - ethtool_convert_legacy_u32_to_link_mode(advertising, cmd->advertising); - linkmode_and(advertising, advertising, phydev->supported); - - /* Verify the settings we care about. */ - if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) - return -EINVAL; - - if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) - return -EINVAL; - - if (cmd->autoneg == AUTONEG_DISABLE && - ((speed != SPEED_1000 && - speed != SPEED_100 && - speed != SPEED_10) || - (cmd->duplex != DUPLEX_HALF && - cmd->duplex != DUPLEX_FULL))) - return -EINVAL; - - phydev->autoneg = cmd->autoneg; - - phydev->speed = speed; - - linkmode_copy(phydev->advertising, advertising); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - phydev->advertising, AUTONEG_ENABLE == cmd->autoneg); - - phydev->duplex = cmd->duplex; - - phydev->mdix_ctrl = cmd->eth_tp_mdix_ctrl; - - /* Restart the PHY */ - phy_start_aneg(phydev); - - return 0; -} -EXPORT_SYMBOL(phy_ethtool_sset); - int phy_ethtool_ksettings_set(struct phy_device *phydev, const struct ethtool_link_ksettings *cmd) { @@ -841,6 +782,9 @@ void phy_stop(struct phy_device *phydev) mutex_lock(&phydev->lock); + if (phydev->sfp_bus) + sfp_upstream_stop(phydev->sfp_bus); + phydev->state = PHY_HALTED; mutex_unlock(&phydev->lock); @@ -875,6 +819,9 @@ void phy_start(struct phy_device *phydev) goto out; } + if (phydev->sfp_bus) + sfp_upstream_start(phydev->sfp_bus); + /* if phy was suspended, bring the physical link up again */ __phy_resume(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index fa71998fea51..0887ed2bb050 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -27,6 +27,7 @@ #include <linux/bitmap.h> #include <linux/phy.h> #include <linux/phy_led_triggers.h> +#include <linux/sfp.h> #include <linux/mdio.h> #include <linux/io.h> #include <linux/uaccess.h> @@ -488,7 +489,7 @@ static int phy_bus_match(struct device *dev, struct device_driver *drv) if (phydev->is_c45) { for (i = 1; i < num_ids; i++) { - if (!(phydev->c45_ids.devices_in_package & (1 << i))) + if (phydev->c45_ids.device_ids[i] == 0xffffffff) continue; if ((phydrv->phy_id & phydrv->phy_id_mask) == @@ -596,8 +597,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, mdiodev->device_free = phy_mdio_device_free; mdiodev->device_remove = phy_mdio_device_remove; - dev->speed = 0; - dev->duplex = -1; + dev->speed = SPEED_UNKNOWN; + dev->duplex = DUPLEX_UNKNOWN; dev->pause = 0; dev->asym_pause = 0; dev->link = 0; @@ -632,7 +633,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, int i; for (i = 1; i < num_ids; i++) { - if (!(c45_ids->devices_in_package & (1 << i))) + if (c45_ids->device_ids[i] == 0xffffffff) continue; ret = phy_request_driver_module(dev, @@ -812,10 +813,13 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, */ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) { - struct phy_c45_device_ids c45_ids = {0}; + struct phy_c45_device_ids c45_ids; u32 phy_id = 0; int r; + c45_ids.devices_in_package = 0; + memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids)); + r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids); if (r) return ERR_PTR(r); @@ -1175,6 +1179,65 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(phy_standalone); /** + * phy_sfp_attach - attach the SFP bus to the PHY upstream network device + * @upstream: pointer to the phy device + * @bus: sfp bus representing cage being attached + * + * This is used to fill in the sfp_upstream_ops .attach member. + */ +void phy_sfp_attach(void *upstream, struct sfp_bus *bus) +{ + struct phy_device *phydev = upstream; + + if (phydev->attached_dev) + phydev->attached_dev->sfp_bus = bus; + phydev->sfp_bus_attached = true; +} +EXPORT_SYMBOL(phy_sfp_attach); + +/** + * phy_sfp_detach - detach the SFP bus from the PHY upstream network device + * @upstream: pointer to the phy device + * @bus: sfp bus representing cage being attached + * + * This is used to fill in the sfp_upstream_ops .detach member. + */ +void phy_sfp_detach(void *upstream, struct sfp_bus *bus) +{ + struct phy_device *phydev = upstream; + + if (phydev->attached_dev) + phydev->attached_dev->sfp_bus = NULL; + phydev->sfp_bus_attached = false; +} +EXPORT_SYMBOL(phy_sfp_detach); + +/** + * phy_sfp_probe - probe for a SFP cage attached to this PHY device + * @phydev: Pointer to phy_device + * @ops: SFP's upstream operations + */ +int phy_sfp_probe(struct phy_device *phydev, + const struct sfp_upstream_ops *ops) +{ + struct sfp_bus *bus; + int ret; + + if (phydev->mdio.dev.fwnode) { + bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode); + if (IS_ERR(bus)) + return PTR_ERR(bus); + + phydev->sfp_bus = bus; + + ret = sfp_bus_add_upstream(bus, phydev, ops); + sfp_bus_put(bus); + } + return 0; +} +EXPORT_SYMBOL(phy_sfp_probe); + +/** * phy_attach_direct - attach a network device to a given PHY device pointer * @dev: network device to attach * @phydev: Pointer to phy_device to attach @@ -1249,6 +1312,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, if (dev) { phydev->attached_dev = dev; dev->phydev = phydev; + + if (phydev->sfp_bus_attached) + dev->sfp_bus = phydev->sfp_bus; } /* Some Ethernet drivers try to connect to a PHY device before @@ -2418,6 +2484,9 @@ static int phy_remove(struct device *dev) phydev->state = PHY_DOWN; mutex_unlock(&phydev->lock); + sfp_bus_del_upstream(phydev->sfp_bus); + phydev->sfp_bus = NULL; + if (phydev->drv && phydev->drv->remove) { phydev->drv->remove(phydev); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 6ce2e6c63e75..8e2a12885789 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -593,6 +593,8 @@ static int phylink_register_sfp(struct phylink *pl, * Create a new phylink instance, and parse the link parameters found in @np. * This will parse in-band modes, fixed-link or SFP configuration. * + * Note: the rtnl lock must not be held when calling this function. + * * Returns a pointer to a &struct phylink, or an error-pointer value. Users * must use IS_ERR() to check for errors from this function. */ @@ -670,6 +672,8 @@ EXPORT_SYMBOL_GPL(phylink_create); * * Destroy a phylink instance. Any PHY that has been attached must have been * cleaned up via phylink_disconnect_phy() prior to calling this function. + * + * Note: the rtnl lock must not be held when calling this function. */ void phylink_destroy(struct phylink *pl) { @@ -1242,7 +1246,13 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, pl->link_config.duplex = our_kset.base.duplex; pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; - if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { + /* If we have a PHY, phylib will call our link state function if the + * mode has changed, which will trigger a resolve and update the MAC + * configuration. For a fixed link, this isn't able to change any + * parameters, which just leaves inband mode. + */ + if (pl->link_an_mode == MLO_AN_INBAND && + !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { phylink_mac_config(pl, &pl->link_config); phylink_mac_an_restart(pl); } @@ -1322,15 +1332,16 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, if (pause->tx_pause) config->pause |= MLO_PAUSE_TX; - if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { + /* If we have a PHY, phylib will call our link state function if the + * mode has changed, which will trigger a resolve and update the MAC + * configuration. + */ + if (pl->phydev) { + phy_set_asym_pause(pl->phydev, pause->rx_pause, + pause->tx_pause); + } else if (!test_bit(PHYLINK_DISABLE_STOPPED, + &pl->phylink_disable_state)) { switch (pl->link_an_mode) { - case MLO_AN_PHY: - /* Silently mark the carrier down, and then trigger a resolve */ - if (pl->netdev) - netif_carrier_off(pl->netdev); - phylink_run_resolve(pl); - break; - case MLO_AN_FIXED: /* Should we allow fixed links to change against the config? */ phylink_resolve_flow(pl, config); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index c5398a023440..5a72093ab6e7 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -10,6 +10,12 @@ #include "sfp.h" +struct sfp_quirk { + const char *vendor; + const char *part; + void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes); +}; + /** * struct sfp_bus - internal representation of a sfp bus */ @@ -22,6 +28,7 @@ struct sfp_bus { const struct sfp_socket_ops *socket_ops; struct device *sfp_dev; struct sfp *sfp; + const struct sfp_quirk *sfp_quirk; const struct sfp_upstream_ops *upstream_ops; void *upstream; @@ -31,6 +38,71 @@ struct sfp_bus { bool started; }; +static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id, + unsigned long *modes) +{ + phylink_set(modes, 2500baseX_Full); +} + +static const struct sfp_quirk sfp_quirks[] = { + { + // Alcatel Lucent G-010S-P can operate at 2500base-X, but + // incorrectly report 2500MBd NRZ in their EEPROM + .vendor = "ALCATELLUCENT", + .part = "G010SP", + .modes = sfp_quirk_2500basex, + }, { + // Alcatel Lucent G-010S-A can operate at 2500base-X, but + // report 3.2GBd NRZ in their EEPROM + .vendor = "ALCATELLUCENT", + .part = "3FE46541AA", + .modes = sfp_quirk_2500basex, + }, { + // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd + // NRZ in their EEPROM + .vendor = "HUAWEI", + .part = "MA5671A", + .modes = sfp_quirk_2500basex, + }, +}; + +static size_t sfp_strlen(const char *str, size_t maxlen) +{ + size_t size, i; + + /* Trailing characters should be filled with space chars */ + for (i = 0, size = 0; i < maxlen; i++) + if (str[i] != ' ') + size = i + 1; + + return size; +} + +static bool sfp_match(const char *qs, const char *str, size_t len) +{ + if (!qs) + return true; + if (strlen(qs) != len) + return false; + return !strncmp(qs, str, len); +} + +static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id) +{ + const struct sfp_quirk *q; + unsigned int i; + size_t vs, ps; + + vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name)); + ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn)); + + for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++) + if (sfp_match(q->vendor, id->base.vendor_name, vs) && + sfp_match(q->part, id->base.vendor_pn, ps)) + return q; + + return NULL; +} /** * sfp_parse_port() - Parse the EEPROM base ID, setting the port type * @bus: a pointer to the &struct sfp_bus structure for the sfp module @@ -234,6 +306,9 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, phylink_set(modes, 1000baseX_Full); } + if (bus->sfp_quirk) + bus->sfp_quirk->modes(id, modes); + bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS); phylink_set(support, Autoneg); @@ -610,6 +685,8 @@ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id) const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); int ret = 0; + bus->sfp_quirk = sfp_lookup_quirk(id); + if (ops && ops->module_insert) ret = ops->module_insert(bus->upstream, id); @@ -623,6 +700,8 @@ void sfp_module_remove(struct sfp_bus *bus) if (ops && ops->module_remove) ops->module_remove(bus->upstream); + + bus->sfp_quirk = NULL; } EXPORT_SYMBOL_GPL(sfp_module_remove); diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index b0f88c2c0153..bdbbb76f8fd3 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -201,7 +201,10 @@ struct sfp { struct gpio_desc *gpio[GPIO_MAX]; int gpio_irq[GPIO_MAX]; + bool need_poll; + struct mutex st_mutex; /* Protects state */ + unsigned int state_soft_mask; unsigned int state; struct delayed_work poll; struct delayed_work timeout; @@ -395,24 +398,90 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) } /* Interface */ -static unsigned int sfp_get_state(struct sfp *sfp) +static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len) { - return sfp->get_state(sfp); + return sfp->read(sfp, a2, addr, buf, len); } -static void sfp_set_state(struct sfp *sfp, unsigned int state) +static int sfp_write(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len) { - sfp->set_state(sfp, state); + return sfp->write(sfp, a2, addr, buf, len); } -static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len) +static unsigned int sfp_soft_get_state(struct sfp *sfp) { - return sfp->read(sfp, a2, addr, buf, len); + unsigned int state = 0; + u8 status; + + if (sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status)) == + sizeof(status)) { + if (status & SFP_STATUS_RX_LOS) + state |= SFP_F_LOS; + if (status & SFP_STATUS_TX_FAULT) + state |= SFP_F_TX_FAULT; + } + + return state & sfp->state_soft_mask; } -static int sfp_write(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len) +static void sfp_soft_set_state(struct sfp *sfp, unsigned int state) { - return sfp->write(sfp, a2, addr, buf, len); + u8 status; + + if (sfp_read(sfp, true, SFP_STATUS, &status, sizeof(status)) == + sizeof(status)) { + if (state & SFP_F_TX_DISABLE) + status |= SFP_STATUS_TX_DISABLE_FORCE; + else + status &= ~SFP_STATUS_TX_DISABLE_FORCE; + + sfp_write(sfp, true, SFP_STATUS, &status, sizeof(status)); + } +} + +static void sfp_soft_start_poll(struct sfp *sfp) +{ + const struct sfp_eeprom_id *id = &sfp->id; + + sfp->state_soft_mask = 0; + if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE && + !sfp->gpio[GPIO_TX_DISABLE]) + sfp->state_soft_mask |= SFP_F_TX_DISABLE; + if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT && + !sfp->gpio[GPIO_TX_FAULT]) + sfp->state_soft_mask |= SFP_F_TX_FAULT; + if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS && + !sfp->gpio[GPIO_LOS]) + sfp->state_soft_mask |= SFP_F_LOS; + + if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) && + !sfp->need_poll) + mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); +} + +static void sfp_soft_stop_poll(struct sfp *sfp) +{ + sfp->state_soft_mask = 0; +} + +static unsigned int sfp_get_state(struct sfp *sfp) +{ + unsigned int state = sfp->get_state(sfp); + + if (state & SFP_F_PRESENT && + sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT)) + state |= sfp_soft_get_state(sfp); + + return state; +} + +static void sfp_set_state(struct sfp *sfp, unsigned int state) +{ + sfp->set_state(sfp, state); + + if (state & SFP_F_PRESENT && + sfp->state_soft_mask & SFP_F_TX_DISABLE) + sfp_soft_set_state(sfp, state); } static unsigned int sfp_check(void *buf, size_t len) @@ -1407,11 +1476,6 @@ static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn) } } -static void sfp_sm_mod_init(struct sfp *sfp) -{ - sfp_module_tx_enable(sfp); -} - static void sfp_sm_probe_for_phy(struct sfp *sfp) { /* Setting the serdes link mode is guesswork: there's no @@ -1574,7 +1638,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) (int)sizeof(id.ext.datecode), id.ext.datecode); /* Check whether we support this module */ - if (!sfp->type->module_supported(&sfp->id)) { + if (!sfp->type->module_supported(&id)) { dev_err(sfp->dev, "module is not supported - phys id 0x%02x 0x%02x\n", sfp->id.base.phys_id, sfp->id.base.phys_ext_id); @@ -1764,6 +1828,7 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event) if (sfp->mod_phy) sfp_sm_phy_detach(sfp); sfp_module_tx_disable(sfp); + sfp_soft_stop_poll(sfp); sfp_sm_next(sfp, SFP_S_DOWN, 0); return; } @@ -1775,7 +1840,10 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event) sfp->sm_dev_state != SFP_DEV_UP) break; - sfp_sm_mod_init(sfp); + if (!(sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE)) + sfp_soft_start_poll(sfp); + + sfp_module_tx_enable(sfp); /* Initialise the fault clearance retries */ sfp->sm_retries = 5; @@ -2031,7 +2099,10 @@ static void sfp_poll(struct work_struct *work) struct sfp *sfp = container_of(work, struct sfp, poll.work); sfp_check_state(sfp); - mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); + + if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) || + sfp->need_poll) + mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); } static struct sfp *sfp_alloc(struct device *dev) @@ -2076,7 +2147,6 @@ static int sfp_probe(struct platform_device *pdev) const struct sff_data *sff; struct i2c_adapter *i2c; struct sfp *sfp; - bool poll = false; int err, i; sfp = sfp_alloc(&pdev->dev); @@ -2183,7 +2253,7 @@ static int sfp_probe(struct platform_device *pdev) sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]); if (!sfp->gpio_irq[i]) { - poll = true; + sfp->need_poll = true; continue; } @@ -2195,11 +2265,11 @@ static int sfp_probe(struct platform_device *pdev) dev_name(sfp->dev), sfp); if (err) { sfp->gpio_irq[i] = 0; - poll = true; + sfp->need_poll = true; } } - if (poll) + if (sfp->need_poll) mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); /* We could have an issue in cases no Tx disable pin is available or diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ac079395c8d4..c5ebf35d2488 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -5249,10 +5249,10 @@ static int rtl8152_close(struct net_device *netdev) unregister_pm_notifier(&tp->pm_notifier); #endif tasklet_disable(&tp->tx_tl); - napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); + napi_disable(&tp->napi); netif_stop_queue(netdev); res = usb_autopm_get_interface(tp->intf); @@ -5518,10 +5518,10 @@ static int rtl8152_pre_reset(struct usb_interface *intf) netif_stop_queue(netdev); tasklet_disable(&tp->tx_tl); - napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); + napi_disable(&tp->napi); if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); tp->rtl_ops.disable(tp); @@ -5639,7 +5639,7 @@ static int rtl8152_system_resume(struct r8152 *tp) netif_device_attach(netdev); - if (netif_running(netdev) && netdev->flags & IFF_UP) { + if (netif_running(netdev) && (netdev->flags & IFF_UP)) { tp->rtl_ops.up(tp); netif_carrier_off(netdev); set_bit(WORK_ENABLE, &tp->flags); @@ -6213,9 +6213,15 @@ static int rtl8152_set_tunable(struct net_device *netdev, } if (tp->rx_copybreak != val) { - napi_disable(&tp->napi); - tp->rx_copybreak = val; - napi_enable(&tp->napi); + if (netdev->flags & IFF_UP) { + mutex_lock(&tp->control); + napi_disable(&tp->napi); + tp->rx_copybreak = val; + napi_enable(&tp->napi); + mutex_unlock(&tp->control); + } else { + tp->rx_copybreak = val; + } } break; default: @@ -6243,9 +6249,15 @@ static int rtl8152_set_ringparam(struct net_device *netdev, return -EINVAL; if (tp->rx_pending != ring->rx_pending) { - napi_disable(&tp->napi); - tp->rx_pending = ring->rx_pending; - napi_enable(&tp->napi); + if (netdev->flags & IFF_UP) { + mutex_lock(&tp->control); + napi_disable(&tp->napi); + tp->rx_pending = ring->rx_pending; + napi_enable(&tp->napi); + mutex_unlock(&tp->control); + } else { + tp->rx_pending = ring->rx_pending; + } } return 0; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5a635f028bdc..4d7d5434cc5d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2445,11 +2445,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, if (!prog && !old_prog) return 0; - if (prog) { - prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); - if (IS_ERR(prog)) - return PTR_ERR(prog); - } + if (prog) + bpf_prog_add(prog, vi->max_queue_pairs - 1); /* Make sure NAPI is not using any XDP TX queues for RX. */ if (netif_running(dev)) { diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig index 56616d988c96..7b90b8546162 100644 --- a/drivers/net/wireless/ath/Kconfig +++ b/drivers/net/wireless/ath/Kconfig @@ -30,12 +30,12 @@ config ATH_DEBUG Right now only ath9k makes use of this. config ATH_TRACEPOINTS - bool "Atheros wireless tracing" - depends on ATH_DEBUG - depends on EVENT_TRACING - ---help--- - This option enables tracepoints for atheros wireless drivers. - Currently, ath9k makes use of this facility. + bool "Atheros wireless tracing" + depends on ATH_DEBUG + depends on EVENT_TRACING + ---help--- + This option enables tracepoints for atheros wireless drivers. + Currently, ath9k makes use of this facility. config ATH_REG_DYNAMIC_USER_REG_HINTS bool "Atheros dynamic user regulatory hints" diff --git a/drivers/net/wireless/ath/ar5523/Kconfig b/drivers/net/wireless/ath/ar5523/Kconfig index 65b39c7d035d..e82df5f1ea67 100644 --- a/drivers/net/wireless/ath/ar5523/Kconfig +++ b/drivers/net/wireless/ath/ar5523/Kconfig @@ -1,9 +1,9 @@ # SPDX-License-Identifier: ISC config AR5523 - tristate "Atheros AR5523 wireless driver support" - depends on MAC80211 && USB - select ATH_COMMON - select FW_LOADER - ---help--- - This module add support for AR5523 based USB dongles such as D-Link - DWL-G132, Netgear WPN111 and many more. + tristate "Atheros AR5523 wireless driver support" + depends on MAC80211 && USB + select ATH_COMMON + select FW_LOADER + ---help--- + This module add support for AR5523 based USB dongles such as D-Link + DWL-G132, Netgear WPN111 and many more. diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index c99f42284465..78620c6b64a2 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -144,13 +144,13 @@ config ATH9K_RFKILL a platform that can toggle the RF-Kill GPIO. config ATH9K_CHANNEL_CONTEXT - bool "Channel Context support" - depends on ATH9K - default n - ---help--- - This option enables channel context support in ath9k, which is needed - for multi-channel concurrency. Enable this if P2P PowerSave support - is required. + bool "Channel Context support" + depends on ATH9K + default n + ---help--- + This option enables channel context support in ath9k, which is needed + for multi-channel concurrency. Enable this if P2P PowerSave support + is required. config ATH9K_PCOEM bool "Atheros ath9k support for PC OEM cards" if EXPERT @@ -162,32 +162,32 @@ config ATH9K_PCI_NO_EEPROM depends on ATH9K_PCI default n help - This separate driver provides a loader in order to support the - AR500X to AR92XX-generation of ath9k PCI(e) WiFi chips, which have - their initialization data (which contains the real PCI Device ID - that ath9k will need) stored together with the calibration data out - of reach for the ath9k chip. + This separate driver provides a loader in order to support the + AR500X to AR92XX-generation of ath9k PCI(e) WiFi chips, which have + their initialization data (which contains the real PCI Device ID + that ath9k will need) stored together with the calibration data out + of reach for the ath9k chip. - These devices are usually various network appliances, routers or - access Points and such. + These devices are usually various network appliances, routers or + access Points and such. - If unsure say N. + If unsure say N. config ATH9K_HTC - tristate "Atheros HTC based wireless cards support" - depends on USB && MAC80211 - select ATH9K_HW - select MAC80211_LEDS - select LEDS_CLASS - select NEW_LEDS - select ATH9K_COMMON - ---help--- - Support for Atheros HTC based cards. - Chipsets supported: AR9271 - - For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc - - The built module will be ath9k_htc. + tristate "Atheros HTC based wireless cards support" + depends on USB && MAC80211 + select ATH9K_HW + select MAC80211_LEDS + select LEDS_CLASS + select NEW_LEDS + select ATH9K_COMMON + ---help--- + Support for Atheros HTC based cards. + Chipsets supported: AR9271 + + For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc + + The built module will be ath9k_htc. config ATH9K_HTC_DEBUGFS bool "Atheros ath9k_htc debugging" diff --git a/drivers/net/wireless/atmel/Kconfig b/drivers/net/wireless/atmel/Kconfig index 4c0556b3a5ba..c2142c70f25d 100644 --- a/drivers/net/wireless/atmel/Kconfig +++ b/drivers/net/wireless/atmel/Kconfig @@ -13,29 +13,29 @@ config WLAN_VENDOR_ATMEL if WLAN_VENDOR_ATMEL config ATMEL - tristate "Atmel at76c50x chipset 802.11b support" - depends on CFG80211 && (PCI || PCMCIA) - select WIRELESS_EXT - select WEXT_PRIV - select FW_LOADER - select CRC32 - ---help--- - A driver 802.11b wireless cards based on the Atmel fast-vnet - chips. This driver supports standard Linux wireless extensions. - - Many cards based on this chipset do not have flash memory - and need their firmware loaded at start-up. If yours is - one of these, you will need to provide a firmware image - to be loaded into the card by the driver. The Atmel - firmware package can be downloaded from - <http://www.thekelleys.org.uk/atmel> + tristate "Atmel at76c50x chipset 802.11b support" + depends on CFG80211 && (PCI || PCMCIA) + select WIRELESS_EXT + select WEXT_PRIV + select FW_LOADER + select CRC32 + ---help--- + A driver 802.11b wireless cards based on the Atmel fast-vnet + chips. This driver supports standard Linux wireless extensions. + + Many cards based on this chipset do not have flash memory + and need their firmware loaded at start-up. If yours is + one of these, you will need to provide a firmware image + to be loaded into the card by the driver. The Atmel + firmware package can be downloaded from + <http://www.thekelleys.org.uk/atmel> config PCI_ATMEL - tristate "Atmel at76c506 PCI cards" - depends on ATMEL && PCI - ---help--- - Enable support for PCI and mini-PCI cards containing the - Atmel at76c506 chip. + tristate "Atmel at76c506 PCI cards" + depends on ATMEL && PCI + ---help--- + Enable support for PCI and mini-PCI cards containing the + Atmel at76c506 chip. config PCMCIA_ATMEL tristate "Atmel at76c502/at76c504 PCMCIA cards" diff --git a/drivers/net/wireless/ralink/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig index f8a9244ce012..d4969d617822 100644 --- a/drivers/net/wireless/ralink/rt2x00/Kconfig +++ b/drivers/net/wireless/ralink/rt2x00/Kconfig @@ -95,20 +95,20 @@ config RT2800PCI_RT35XX config RT2800PCI_RT53XX - bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)" - default y - ---help--- - This adds support for rt53xx wireless chipset family to the - rt2800pci driver. - Supported chips: RT5390 + bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)" + default y + ---help--- + This adds support for rt53xx wireless chipset family to the + rt2800pci driver. + Supported chips: RT5390 config RT2800PCI_RT3290 - bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)" - default y - ---help--- - This adds support for rt3290 wireless chipset family to the - rt2800pci driver. - Supported chips: RT3290 + bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)" + default y + ---help--- + This adds support for rt3290 wireless chipset family to the + rt2800pci driver. + Supported chips: RT3290 endif config RT2500USB @@ -174,18 +174,18 @@ config RT2800USB_RT3573 in the rt2800usb driver. config RT2800USB_RT53XX - bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)" - ---help--- - This adds support for rt53xx wireless chipset family to the - rt2800usb driver. - Supported chips: RT5370 + bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)" + ---help--- + This adds support for rt53xx wireless chipset family to the + rt2800usb driver. + Supported chips: RT5370 config RT2800USB_RT55XX - bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)" - ---help--- - This adds support for rt55xx wireless chipset family to the - rt2800usb driver. - Supported chips: RT5572 + bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)" + ---help--- + This adds support for rt55xx wireless chipset family to the + rt2800usb driver. + Supported chips: RT5572 config RT2800USB_UNKNOWN bool "rt2800usb - Include support for unknown (USB) devices" diff --git a/drivers/net/wireless/ti/wl12xx/Kconfig b/drivers/net/wireless/ti/wl12xx/Kconfig index e409042ee9a0..9c4511604b67 100644 --- a/drivers/net/wireless/ti/wl12xx/Kconfig +++ b/drivers/net/wireless/ti/wl12xx/Kconfig @@ -1,10 +1,10 @@ # SPDX-License-Identifier: GPL-2.0-only config WL12XX - tristate "TI wl12xx support" + tristate "TI wl12xx support" depends on MAC80211 - select WLCORE - ---help--- + select WLCORE + ---help--- This module adds support for wireless adapters based on TI wl1271, wl1273, wl1281 and wl1283 chipsets. This module does *not* include support for wl1251. For wl1251 support, use the separate homonymous - driver instead. + driver instead. diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index 7997cc6de334..01305ba2d3aa 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c @@ -450,7 +450,6 @@ static void virt_wifi_net_device_destructor(struct net_device *dev) */ kfree(dev->ieee80211_ptr); dev->ieee80211_ptr = NULL; - free_netdev(dev); } /* No lock interaction. */ @@ -458,7 +457,7 @@ static void virt_wifi_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &virt_wifi_ops; - dev->priv_destructor = virt_wifi_net_device_destructor; + dev->needs_free_netdev = true; } /* Called in a RCU read critical section from netif_receive_skb */ @@ -544,6 +543,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev, goto unregister_netdev; } + dev->priv_destructor = virt_wifi_net_device_destructor; priv->being_deleted = false; priv->is_connected = false; priv->is_up = false; diff --git a/drivers/nfc/nfcmrvl/Kconfig b/drivers/nfc/nfcmrvl/Kconfig index 06f34fb4e0b0..ded0d03c0015 100644 --- a/drivers/nfc/nfcmrvl/Kconfig +++ b/drivers/nfc/nfcmrvl/Kconfig @@ -15,7 +15,7 @@ config NFC_MRVL_USB Marvell NFC-over-USB driver. This driver provides support for Marvell NFC-over-USB devices: - 8897. + 8897. Say Y here to compile support for Marvell NFC-over-USB driver into the kernel or say M to compile it as module. diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index 145ddf3f0a45..604dba4f18af 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -783,7 +783,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out, rc = port100_submit_urb_for_ack(dev, GFP_KERNEL); if (rc) - usb_unlink_urb(dev->out_urb); + usb_kill_urb(dev->out_urb); exit: mutex_unlock(&dev->out_urb_lock); diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig index c3fa1840f8de..174888609779 100644 --- a/drivers/phy/ti/Kconfig +++ b/drivers/phy/ti/Kconfig @@ -90,8 +90,8 @@ config TWL4030_USB config PHY_TI_GMII_SEL tristate - default y if TI_CPSW=y - depends on TI_CPSW || COMPILE_TEST + default y if TI_CPSW=y || TI_CPSW_SWITCHDEV=y + depends on TI_CPSW || TI_CPSW_SWITCHDEV || COMPILE_TEST select GENERIC_PHY select REGMAP default m diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index f81f1523d528..293dd99b7fef 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -845,6 +845,7 @@ struct qeth_card { struct service_level qeth_service_level; struct qdio_ssqd_desc ssqd; debug_info_t *debug; + struct mutex sbp_lock; struct mutex conf_mutex; struct mutex discipline_mutex; struct napi_struct napi; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index f1f56e354516..efcbe60220d1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -901,30 +901,30 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, CCW_DEVID(cdev), dstat, cstat); print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 16, 1, irb, 64, 1); - return 1; + return -EIO; } if (dstat & DEV_STAT_UNIT_CHECK) { if (sense[SENSE_RESETTING_EVENT_BYTE] & SENSE_RESETTING_EVENT_FLAG) { QETH_CARD_TEXT(card, 2, "REVIND"); - return 1; + return -EIO; } if (sense[SENSE_COMMAND_REJECT_BYTE] & SENSE_COMMAND_REJECT_FLAG) { QETH_CARD_TEXT(card, 2, "CMDREJi"); - return 1; + return -EIO; } if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { QETH_CARD_TEXT(card, 2, "AFFE"); - return 1; + return -EIO; } if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { QETH_CARD_TEXT(card, 2, "ZEROSEN"); return 0; } QETH_CARD_TEXT(card, 2, "DGENCHK"); - return 1; + return -EIO; } return 0; } diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index ae69c981650d..989935d67b31 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -457,10 +457,14 @@ static void qeth_l2_set_promisc_mode(struct qeth_card *card) if (card->info.promisc_mode == enable) return; - if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) + if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) { qeth_setadp_promisc_mode(card, enable); - else if (card->options.sbp.reflect_promisc) - qeth_l2_promisc_to_bridge(card, enable); + } else { + mutex_lock(&card->sbp_lock); + if (card->options.sbp.reflect_promisc) + qeth_l2_promisc_to_bridge(card, enable); + mutex_unlock(&card->sbp_lock); + } } /* New MAC address is added to the hash table and marked to be written on card @@ -621,6 +625,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) int rc; qeth_l2_vnicc_set_defaults(card); + mutex_init(&card->sbp_lock); if (gdev->dev.type == &qeth_generic_devtype) { rc = qeth_l2_create_device_attributes(&gdev->dev); @@ -779,10 +784,12 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev) goto out_remove; } + mutex_lock(&card->sbp_lock); qeth_bridgeport_query_support(card); if (card->options.sbp.supported_funcs) dev_info(&card->gdev->dev, "The device represents a Bridge Capable Port\n"); + mutex_unlock(&card->sbp_lock); qeth_l2_register_dev_addr(card); @@ -1131,9 +1138,9 @@ static void qeth_bridge_state_change_worker(struct work_struct *work) /* Role should not change by itself, but if it did, */ /* information from the hardware is authoritative. */ - mutex_lock(&data->card->conf_mutex); + mutex_lock(&data->card->sbp_lock); data->card->options.sbp.role = entry->role; - mutex_unlock(&data->card->conf_mutex); + mutex_unlock(&data->card->sbp_lock); snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); snprintf(env_role, sizeof(env_role), "ROLE=%s", @@ -1199,9 +1206,9 @@ static void qeth_bridge_host_event_worker(struct work_struct *work) : (data->hostevs.lost_event_mask == 0x02) ? "Bridge port state change" : "Unknown reason"); - mutex_lock(&data->card->conf_mutex); + mutex_lock(&data->card->sbp_lock); data->card->options.sbp.hostnotification = 0; - mutex_unlock(&data->card->conf_mutex); + mutex_unlock(&data->card->sbp_lock); qeth_bridge_emit_host_event(data->card, anev_abort, 0, NULL, NULL); } else diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index d9af5fe040be..f70c7aac2dcc 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c @@ -21,6 +21,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev, if (qeth_l2_vnicc_is_in_use(card)) return sprintf(buf, "n/a (VNIC characteristics)\n"); + mutex_lock(&card->sbp_lock); if (qeth_card_hw_is_reachable(card) && card->options.sbp.supported_funcs) rc = qeth_bridgeport_query_ports(card, @@ -54,6 +55,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev, else rc = sprintf(buf, "%s\n", word); } + mutex_unlock(&card->sbp_lock); return rc; } @@ -86,6 +88,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); + mutex_lock(&card->sbp_lock); if (qeth_l2_vnicc_is_in_use(card)) rc = -EBUSY; @@ -99,6 +102,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev, } else card->options.sbp.role = role; + mutex_unlock(&card->sbp_lock); mutex_unlock(&card->conf_mutex); return rc ? rc : count; @@ -147,6 +151,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev, return rc; mutex_lock(&card->conf_mutex); + mutex_lock(&card->sbp_lock); if (qeth_l2_vnicc_is_in_use(card)) rc = -EBUSY; @@ -157,6 +162,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev, } else card->options.sbp.hostnotification = enable; + mutex_unlock(&card->sbp_lock); mutex_unlock(&card->conf_mutex); return rc ? rc : count; @@ -206,6 +212,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); + mutex_lock(&card->sbp_lock); if (qeth_l2_vnicc_is_in_use(card)) rc = -EBUSY; @@ -217,6 +224,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev, rc = 0; } + mutex_unlock(&card->sbp_lock); mutex_unlock(&card->conf_mutex); return rc ? rc : count; @@ -252,6 +260,8 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) return; if (!card->options.sbp.supported_funcs) return; + + mutex_lock(&card->sbp_lock); if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { /* Conditional to avoid spurious error messages */ qeth_bridgeport_setrole(card, card->options.sbp.role); @@ -263,8 +273,10 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) rc = qeth_bridgeport_an_set(card, 1); if (rc) card->options.sbp.hostnotification = 0; - } else + } else { qeth_bridgeport_an_set(card, 0); + } + mutex_unlock(&card->sbp_lock); } /* VNIC CHARS support */ diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 6cdd7047c809..2dca8df1a18d 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -312,7 +312,6 @@ void afs_break_callbacks(struct afs_server *server, size_t count, _enter("%p,%zu,", server, count); ASSERT(server != NULL); - ASSERTCMP(count, <=, AFSCBMAX); /* TODO: Sort the callback break list by volume ID */ diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 0e5269374ac1..61498d9f06ef 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -637,6 +637,7 @@ long afs_wait_for_call_to_complete(struct afs_call *call, call->need_attention = false; __set_current_state(TASK_RUNNING); afs_deliver_to_call(call); + timeout = rtt2; continue; } diff --git a/fs/afs/super.c b/fs/afs/super.c index f18911e8d770..488641b1a418 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -435,6 +435,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx) /* fill in the superblock */ sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; + sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; if (!as->dyn_root) diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index d8507972ee13..90c830e3758e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc, return loc->xl_ops->xlo_check_space(loc, xi); } +static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) +{ + loc->xl_ops->xlo_add_entry(loc, name_hash); + loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash); + /* + * We can't leave the new entry's xe_name_offset at zero or + * add_namevalue() will go nuts. We set it to the size of our + * storage so that it can never be less than any other entry. + */ + loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size); +} + static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi) { @@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, if (rc) goto out; - if (!loc->xl_entry) { - rc = -EINVAL; - goto out; - } - - if (ocfs2_xa_can_reuse_entry(loc, xi)) { - orig_value_size = loc->xl_entry->xe_value_size; - rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); - if (rc) - goto out; - goto alloc_value; - } + if (loc->xl_entry) { + if (ocfs2_xa_can_reuse_entry(loc, xi)) { + orig_value_size = loc->xl_entry->xe_value_size; + rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); + if (rc) + goto out; + goto alloc_value; + } - if (!ocfs2_xattr_is_local(loc->xl_entry)) { - orig_clusters = ocfs2_xa_value_clusters(loc); - rc = ocfs2_xa_value_truncate(loc, 0, ctxt); - if (rc) { - mlog_errno(rc); - ocfs2_xa_cleanup_value_truncate(loc, - "overwriting", - orig_clusters); - goto out; + if (!ocfs2_xattr_is_local(loc->xl_entry)) { + orig_clusters = ocfs2_xa_value_clusters(loc); + rc = ocfs2_xa_value_truncate(loc, 0, ctxt); + if (rc) { + mlog_errno(rc); + ocfs2_xa_cleanup_value_truncate(loc, + "overwriting", + orig_clusters); + goto out; + } } - } - ocfs2_xa_wipe_namevalue(loc); + ocfs2_xa_wipe_namevalue(loc); + } else + ocfs2_xa_add_entry(loc, name_hash); /* * If we get here, we have a blank entry. Fill it. We grow our diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 464f3f7e0b7a..e89e86122233 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -12,8 +12,11 @@ #include <linux/err.h> #include <linux/rbtree_latch.h> #include <linux/numa.h> +#include <linux/mm_types.h> #include <linux/wait.h> #include <linux/u64_stats_sync.h> +#include <linux/refcount.h> +#include <linux/mutex.h> struct bpf_verifier_env; struct bpf_verifier_log; @@ -66,6 +69,7 @@ struct bpf_map_ops { u64 *imm, u32 off); int (*map_direct_value_meta)(const struct bpf_map *map, u64 imm, u32 *off); + int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); }; struct bpf_map_memory { @@ -94,17 +98,19 @@ struct bpf_map { u32 btf_value_type_id; struct btf *btf; struct bpf_map_memory memory; + char name[BPF_OBJ_NAME_LEN]; bool unpriv_array; - bool frozen; /* write-once */ - /* 48 bytes hole */ + bool frozen; /* write-once; write-protected by freeze_mutex */ + /* 22 bytes hole */ /* The 3rd and 4th cacheline with misc members to avoid false sharing * particularly with refcounting. */ - atomic_t refcnt ____cacheline_aligned; - atomic_t usercnt; + atomic64_t refcnt ____cacheline_aligned; + atomic64_t usercnt; struct work_struct work; - char name[BPF_OBJ_NAME_LEN]; + struct mutex freeze_mutex; + u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) @@ -246,7 +252,7 @@ struct bpf_func_proto { }; enum bpf_arg_type arg_type[5]; }; - u32 *btf_id; /* BTF ids of arguments */ + int *btf_id; /* BTF ids of arguments */ }; /* bpf_context is intentionally undefined structure. Pointer to bpf_context is @@ -384,8 +390,106 @@ struct bpf_prog_stats { struct u64_stats_sync syncp; } __aligned(2 * sizeof(u64)); +struct btf_func_model { + u8 ret_size; + u8 nr_args; + u8 arg_size[MAX_BPF_FUNC_ARGS]; +}; + +/* Restore arguments before returning from trampoline to let original function + * continue executing. This flag is used for fentry progs when there are no + * fexit progs. + */ +#define BPF_TRAMP_F_RESTORE_REGS BIT(0) +/* Call original function after fentry progs, but before fexit progs. + * Makes sense for fentry/fexit, normal calls and indirect calls. + */ +#define BPF_TRAMP_F_CALL_ORIG BIT(1) +/* Skip current frame and return to parent. Makes sense for fentry/fexit + * programs only. Should not be used with normal calls and indirect calls. + */ +#define BPF_TRAMP_F_SKIP_FRAME BIT(2) + +/* Different use cases for BPF trampoline: + * 1. replace nop at the function entry (kprobe equivalent) + * flags = BPF_TRAMP_F_RESTORE_REGS + * fentry = a set of programs to run before returning from trampoline + * + * 2. replace nop at the function entry (kprobe + kretprobe equivalent) + * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME + * orig_call = fentry_ip + MCOUNT_INSN_SIZE + * fentry = a set of program to run before calling original function + * fexit = a set of program to run after original function + * + * 3. replace direct call instruction anywhere in the function body + * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) + * With flags = 0 + * fentry = a set of programs to run before returning from trampoline + * With flags = BPF_TRAMP_F_CALL_ORIG + * orig_call = original callback addr or direct function addr + * fentry = a set of program to run before calling original function + * fexit = a set of program to run after original function + */ +int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags, + struct bpf_prog **fentry_progs, int fentry_cnt, + struct bpf_prog **fexit_progs, int fexit_cnt, + void *orig_call); +/* these two functions are called from generated trampoline */ +u64 notrace __bpf_prog_enter(void); +void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); + +enum bpf_tramp_prog_type { + BPF_TRAMP_FENTRY, + BPF_TRAMP_FEXIT, + BPF_TRAMP_MAX +}; + +struct bpf_trampoline { + /* hlist for trampoline_table */ + struct hlist_node hlist; + /* serializes access to fields of this trampoline */ + struct mutex mutex; + refcount_t refcnt; + u64 key; + struct { + struct btf_func_model model; + void *addr; + } func; + /* list of BPF programs using this trampoline */ + struct hlist_head progs_hlist[BPF_TRAMP_MAX]; + /* Number of attached programs. A counter per kind. */ + int progs_cnt[BPF_TRAMP_MAX]; + /* Executable image of trampoline */ + void *image; + u64 selector; +}; +#ifdef CONFIG_BPF_JIT +struct bpf_trampoline *bpf_trampoline_lookup(u64 key); +int bpf_trampoline_link_prog(struct bpf_prog *prog); +int bpf_trampoline_unlink_prog(struct bpf_prog *prog); +void bpf_trampoline_put(struct bpf_trampoline *tr); +#else +static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key) +{ + return NULL; +} +static inline int bpf_trampoline_link_prog(struct bpf_prog *prog) +{ + return -ENOTSUPP; +} +static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog) +{ + return -ENOTSUPP; +} +static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} +#endif + +struct bpf_func_info_aux { + bool unreliable; +}; + struct bpf_prog_aux { - atomic_t refcnt; + atomic64_t refcnt; u32 used_map_cnt; u32 max_ctx_offset; u32 max_pkt_offset; @@ -395,9 +499,14 @@ struct bpf_prog_aux { u32 func_cnt; /* used by non-func prog as the number of func progs */ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + struct bpf_prog *linked_prog; bool verifier_zext; /* Zero extensions has been inserted by verifier. */ bool offload_requested; bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ + bool func_proto_unreliable; + enum bpf_tramp_prog_type trampoline_prog_type; + struct bpf_trampoline *trampoline; + struct hlist_node tramp_hlist; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; /* function name for valid attach_btf_id */ @@ -419,6 +528,7 @@ struct bpf_prog_aux { struct bpf_prog_offload *offload; struct btf *btf; struct bpf_func_info *func_info; + struct bpf_func_info_aux *func_info_aux; /* bpf_line_info loaded from userspace. linfo->insn_off * has the xlated insn offset. * Both the main and sub prog share the same linfo. @@ -648,7 +758,7 @@ DECLARE_PER_CPU(int, bpf_prog_active); extern const struct file_operations bpf_map_fops; extern const struct file_operations bpf_prog_fops; -#define BPF_PROG_TYPE(_id, _name) \ +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ extern const struct bpf_prog_ops _name ## _prog_ops; \ extern const struct bpf_verifier_ops _name ## _verifier_ops; #define BPF_MAP_TYPE(_id, _ops) \ @@ -664,9 +774,9 @@ extern const struct bpf_verifier_ops xdp_analyzer_ops; struct bpf_prog *bpf_prog_get(u32 ufd); struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, bool attach_drv); -struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); +void bpf_prog_add(struct bpf_prog *prog, int i); void bpf_prog_sub(struct bpf_prog *prog, int i); -struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); +void bpf_prog_inc(struct bpf_prog *prog); struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); int __bpf_prog_charge(struct user_struct *user, u32 pages); @@ -677,9 +787,9 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); -struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); -struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map, - bool uref); +void bpf_map_inc(struct bpf_map *map); +void bpf_map_inc_with_uref(struct bpf_map *map); +struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); @@ -689,6 +799,7 @@ void bpf_map_charge_finish(struct bpf_map_memory *mem); void bpf_map_charge_move(struct bpf_map_memory *dst, struct bpf_map_memory *src); void *bpf_map_area_alloc(u64 size, int numa_node); +void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); void bpf_map_area_free(void *base); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); @@ -782,7 +893,16 @@ int btf_struct_access(struct bpf_verifier_log *log, const struct btf_type *t, int off, int size, enum bpf_access_type atype, u32 *next_btf_id); -u32 btf_resolve_helper_id(struct bpf_verifier_log *log, void *, int); +int btf_resolve_helper_id(struct bpf_verifier_log *log, + const struct bpf_func_proto *fn, int); + +int btf_distill_func_proto(struct bpf_verifier_log *log, + struct btf *btf, + const struct btf_type *func_proto, + const char *func_name, + struct btf_func_model *m); + +int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) @@ -797,10 +917,8 @@ static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, return ERR_PTR(-EOPNOTSUPP); } -static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, - int i) +static inline void bpf_prog_add(struct bpf_prog *prog, int i) { - return ERR_PTR(-EOPNOTSUPP); } static inline void bpf_prog_sub(struct bpf_prog *prog, int i) @@ -811,9 +929,8 @@ static inline void bpf_prog_put(struct bpf_prog *prog) { } -static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) +static inline void bpf_prog_inc(struct bpf_prog *prog) { - return ERR_PTR(-EOPNOTSUPP); } static inline struct bpf_prog *__must_check @@ -1107,6 +1224,15 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, #endif #ifdef CONFIG_INET +struct sk_reuseport_kern { + struct sk_buff *skb; + struct sock *sk; + struct sock *selected_sk; + void *data_end; + u32 hash; + u32 reuseport_id; + bool bind_inany; +}; bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info); @@ -1157,4 +1283,12 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, } #endif /* CONFIG_INET */ +enum bpf_text_poke_type { + BPF_MOD_NOP_TO_CALL, + BPF_MOD_CALL_TO_CALL, + BPF_MOD_CALL_TO_NOP, +}; +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, + void *addr1, void *addr2); + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index de14872b01ba..93740b3614d7 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -2,42 +2,68 @@ /* internal file - do not include directly */ #ifdef CONFIG_NET -BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter) -BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act) -BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) -BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) +BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp, + struct xdp_md, struct xdp_buff) #ifdef CONFIG_CGROUP_BPF -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb) -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock) -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock, + struct bpf_sock, struct sock) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr, + struct bpf_sock_addr, struct bpf_sock_addr_kern) #endif -BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in) -BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out) -BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) -BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local) -BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) -BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) -BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) -BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops, + struct bpf_sock_ops, struct bpf_sock_ops_kern) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb, + struct __sk_buff, struct sk_buff) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg, + struct sk_msg_md, struct sk_msg) +BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector, + struct __sk_buff, struct bpf_flow_dissector) #endif #ifdef CONFIG_BPF_EVENTS -BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) -BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) -BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) -BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint) -BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable) -BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing) +BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe, + bpf_user_pt_regs_t, struct pt_regs) +BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint, + __u64, u64) +BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event, + struct bpf_perf_event_data, struct bpf_perf_event_data_kern) +BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint, + struct bpf_raw_tracepoint_args, u64) +BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable, + struct bpf_raw_tracepoint_args, u64) +BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing, + void *, void *) #endif #ifdef CONFIG_CGROUP_BPF -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl) -BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev, + struct bpf_cgroup_dev_ctx, struct bpf_cgroup_dev_ctx) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl, + struct bpf_sysctl, struct bpf_sysctl_kern) +BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt, + struct bpf_sockopt, struct bpf_sockopt_kern) #endif #ifdef CONFIG_BPF_LIRC_MODE2 -BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2) +BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2, + __u32, u32) #endif #ifdef CONFIG_INET -BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport, + struct sk_reuseport_md, struct sk_reuseport_kern) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 6e7284ea1468..cdd08bf0ec06 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -343,6 +343,7 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) #define BPF_MAX_SUBPROGS 256 struct bpf_subprog_info { + /* 'start' has to be the first field otherwise find_subprog() won't work */ u32 start; /* insn idx of function entry point */ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ u16 stack_depth; /* max. stack depth used by this function */ diff --git a/include/linux/btf.h b/include/linux/btf.h index 9dee00859c5f..79d4abc2556a 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -88,6 +88,7 @@ static inline bool btf_type_is_func_proto(const struct btf_type *t) const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); const char *btf_name_by_offset(const struct btf *btf, u32 offset); struct btf *btf_parse_vmlinux(void); +struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); #else static inline const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) diff --git a/include/linux/filter.h b/include/linux/filter.h index 7a6f8f6f1da4..ad80e9c6111c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -515,10 +515,12 @@ struct sock_fprog_kern { struct sock_filter *filter; }; +/* Some arches need doubleword alignment for their instructions and/or data */ +#define BPF_IMAGE_ALIGNMENT 8 + struct bpf_binary_header { u32 pages; - /* Some arches need word alignment for their instructions */ - u8 image[] __aligned(4); + u8 image[] __aligned(BPF_IMAGE_ALIGNMENT); }; struct bpf_prog { diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index ed11ef594378..6d8bf4bdf240 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -336,7 +336,8 @@ enum { #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) -#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) +#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ + ((u64)((pfsid >> 4) & 0xfff) << 52)) #define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_MAX_INVS 32 @@ -360,7 +361,8 @@ enum { #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) -#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) +#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ + ((u64)((pfsid >> 4) & 0xfff) << 52)) #define QI_DEV_EIOTLB_MAX_INVS 32 /* Page group response descriptor QW0 */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 78436d58ce7c..f5cdfb206097 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -203,6 +203,8 @@ static inline const char *phy_modes(phy_interface_t interface) struct device; struct phylink; +struct sfp_bus; +struct sfp_upstream_ops; struct sk_buff; /* @@ -342,6 +344,8 @@ struct phy_c45_device_ids { * dev_flags: Device-specific flags used by the PHY driver. * irq: IRQ number of the PHY's interrupt (-1 if none) * phy_timer: The timer for handling the state machine + * sfp_bus_attached: flag indicating whether the SFP bus has been attached + * sfp_bus: SFP bus attached to this PHY's fiber port * attached_dev: The attached enet driver's device instance ptr * adjust_link: Callback for the enet controller to respond to * changes in the link state. @@ -432,6 +436,9 @@ struct phy_device { struct mutex lock; + /* This may be modified under the rtnl lock */ + bool sfp_bus_attached; + struct sfp_bus *sfp_bus; struct phylink *phylink; struct net_device *attached_dev; @@ -1020,6 +1027,10 @@ int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); int __phy_resume(struct phy_device *phydev); int phy_loopback(struct phy_device *phydev, bool enable); +void phy_sfp_attach(void *upstream, struct sfp_bus *bus); +void phy_sfp_detach(void *upstream, struct sfp_bus *bus); +int phy_sfp_probe(struct phy_device *phydev, + const struct sfp_upstream_ops *ops); struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, phy_interface_t interface); struct phy_device *phy_find_first(struct mii_bus *bus); @@ -1149,7 +1160,6 @@ void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); -int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); void phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd); int phy_ethtool_ksettings_set(struct phy_device *phydev, diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 3b35efd85bb1..487fd9412d10 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -428,6 +428,10 @@ enum { SFP_TEC_CUR = 0x6c, SFP_STATUS = 0x6e, + SFP_STATUS_TX_DISABLE = BIT(7), + SFP_STATUS_TX_DISABLE_FORCE = BIT(6), + SFP_STATUS_TX_FAULT = BIT(2), + SFP_STATUS_RX_LOS = BIT(1), SFP_ALARM0 = 0x70, SFP_ALARM0_TEMP_HIGH = BIT(7), SFP_ALARM0_TEMP_LOW = BIT(6), diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index dfe02b658829..eceb3607864b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -4171,12 +4171,18 @@ static inline void skb_ext_reset(struct sk_buff *skb) skb->active_extensions = 0; } } + +static inline bool skb_has_extensions(struct sk_buff *skb) +{ + return unlikely(skb->active_extensions); +} #else static inline void skb_ext_put(struct sk_buff *skb) {} static inline void skb_ext_reset(struct sk_buff *skb) {} static inline void skb_ext_del(struct sk_buff *skb, int unused) {} static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} +static inline bool skb_has_extensions(struct sk_buff *skb) { return false; } #endif /* CONFIG_SKB_EXTENSIONS */ static inline void nf_reset_ct(struct sk_buff *skb) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 4e7809408073..b4c58a191eb1 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -93,6 +93,7 @@ extern void *vzalloc(unsigned long size); extern void *vmalloc_user(unsigned long size); extern void *vmalloc_node(unsigned long size, int node); extern void *vzalloc_node(unsigned long size, int node); +extern void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags); extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32_user(unsigned long size); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5ded77fad7fb..059524b87c4c 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2606,6 +2606,13 @@ enum wiphy_params_flags { #define IEEE80211_DEFAULT_AIRTIME_WEIGHT 256 +/* The per TXQ device queue limit in airtime */ +#define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L 5000 +#define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H 12000 + +/* The per interface airtime threshold to switch to lower queue limit */ +#define IEEE80211_AQL_THRESHOLD 24000 + /** * struct cfg80211_pmksa - PMK Security Association * diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index b1063db63e66..b8c20e9f343e 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -48,9 +48,14 @@ struct flow_dissector_key_tags { }; struct flow_dissector_key_vlan { - u16 vlan_id:12, - vlan_dei:1, - vlan_priority:3; + union { + struct { + u16 vlan_id:12, + vlan_dei:1, + vlan_priority:3; + }; + __be16 vlan_tci; + }; __be16 vlan_tpid; }; @@ -203,9 +208,11 @@ struct flow_dissector_key_ip { /** * struct flow_dissector_key_meta: * @ingress_ifindex: ingress ifindex + * @ingress_iftype: ingress interface type */ struct flow_dissector_key_meta { int ingress_ifindex; + u16 ingress_iftype; }; /** diff --git a/include/net/ip.h b/include/net/ip.h index a2c61c36dc4a..cebf3e10def1 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -339,10 +339,10 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o void inet_get_local_port_range(struct net *net, int *low, int *high); #ifdef CONFIG_SYSCTL -static inline int inet_is_local_reserved_port(struct net *net, int port) +static inline bool inet_is_local_reserved_port(struct net *net, int port) { if (!net->ipv4.sysctl_local_reserved_ports) - return 0; + return false; return test_bit(port, net->ipv4.sysctl_local_reserved_ports); } @@ -357,9 +357,9 @@ static inline int inet_prot_sock(struct net *net) } #else -static inline int inet_is_local_reserved_port(struct net *net, int port) +static inline bool inet_is_local_reserved_port(struct net *net, int port) { - return 0; + return false; } static inline int inet_prot_sock(struct net *net) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 5d1615463138..f1535f172935 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -90,7 +90,32 @@ struct fib6_gc_args { #ifndef CONFIG_IPV6_SUBTREES #define FIB6_SUBTREE(fn) NULL + +static inline bool fib6_routes_require_src(const struct net *net) +{ + return false; +} + +static inline void fib6_routes_require_src_inc(struct net *net) {} +static inline void fib6_routes_require_src_dec(struct net *net) {} + #else + +static inline bool fib6_routes_require_src(const struct net *net) +{ + return net->ipv6.fib6_routes_require_src > 0; +} + +static inline void fib6_routes_require_src_inc(struct net *net) +{ + net->ipv6.fib6_routes_require_src++; +} + +static inline void fib6_routes_require_src_dec(struct net *net) +{ + net->ipv6.fib6_routes_require_src--; +} + #define FIB6_SUBTREE(fn) (rcu_dereference_protected((fn)->subtree, 1)) #endif @@ -212,6 +237,11 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) return ((struct rt6_info *)dst)->rt6i_idev; } +static inline bool fib6_requires_src(const struct fib6_info *rt) +{ + return rt->fib6_src.plen > 0; +} + static inline void fib6_clean_expires(struct fib6_info *f6i) { f6i->fib6_flags &= ~RTF_EXPIRES; @@ -502,6 +532,11 @@ static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric) } #ifdef CONFIG_IPV6_MULTIPLE_TABLES +static inline bool fib6_has_custom_rules(const struct net *net) +{ + return net->ipv6.fib6_has_custom_rules; +} + int fib6_rules_init(void); void fib6_rules_cleanup(void); bool fib6_rule_default(const struct fib_rule *rule); @@ -527,6 +562,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net, return true; } #else +static inline bool fib6_has_custom_rules(const struct net *net) +{ + return false; +} static inline int fib6_rules_init(void) { return 0; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 52b2406a5dfc..b9cba41c6d4f 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -311,6 +311,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp, return err; } +static inline bool fib4_has_custom_rules(const struct net *net) +{ + return false; +} + static inline bool fib4_rule_default(const struct fib_rule *rule) { return true; @@ -378,6 +383,11 @@ out: return err; } +static inline bool fib4_has_custom_rules(const struct net *net) +{ + return net->ipv4.fib_has_custom_rules; +} + bool fib4_rule_default(const struct fib_rule *rule); int fib4_rules_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index c643a19dce96..aa145808e57a 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1060,6 +1060,22 @@ struct ieee80211_tx_info { }; }; +static inline u16 +ieee80211_info_set_tx_time_est(struct ieee80211_tx_info *info, u16 tx_time_est) +{ + /* We only have 10 bits in tx_time_est, so store airtime + * in increments of 4us and clamp the maximum to 2**12-1 + */ + info->tx_time_est = min_t(u16, tx_time_est, 4095) >> 2; + return info->tx_time_est << 2; +} + +static inline u16 +ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info) +{ + return info->tx_time_est << 2; +} + /** * struct ieee80211_tx_status - extended tx status info for rate control * @@ -5566,6 +5582,18 @@ void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, u32 tx_airtime, u32 rx_airtime); /** + * ieee80211_txq_airtime_check - check if a txq can send frame to device + * + * @hw: pointer obtained from ieee80211_alloc_hw() + * @txq: pointer obtained from station or virtual interface + * + * Return true if the AQL's airtime limit has not been reached and the txq can + * continue to send more packets to the device. Otherwise return false. + */ +bool +ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq); + +/** * ieee80211_iter_keys - iterate keys programmed into the device * @hw: pointer obtained from ieee80211_alloc_hw() * @vif: virtual interface to iterate, may be %NULL for all @@ -6424,4 +6452,33 @@ void ieee80211_nan_func_match(struct ieee80211_vif *vif, struct cfg80211_nan_match_params *match, gfp_t gfp); +/** + * ieee80211_calc_rx_airtime - calculate estimated transmission airtime for RX. + * + * This function calculates the estimated airtime usage of a frame based on the + * rate information in the RX status struct and the frame length. + * + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @status: &struct ieee80211_rx_status containing the transmission rate + * information. + * @len: frame length in bytes + */ +u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, + struct ieee80211_rx_status *status, + int len); + +/** + * ieee80211_calc_tx_airtime - calculate estimated transmission airtime for TX. + * + * This function calculates the estimated airtime usage of a frame based on the + * rate information in the TX info struct and the frame length. + * + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @info: &struct ieee80211_tx_info of the frame. + * @len: frame length in bytes + */ +u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw, + struct ieee80211_tx_info *info, + int len); + #endif /* MAC80211_H */ diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index eea66de328d3..f0897b3c97fb 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -163,9 +163,12 @@ void nf_flow_table_offload_flush(struct nf_flowtable *flowtable); int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, struct net_device *dev, enum flow_block_command cmd); -int nf_flow_rule_route(struct net *net, const struct flow_offload *flow, - enum flow_offload_tuple_dir dir, - struct nf_flow_rule *flow_rule); +int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow, + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule); +int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow, + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule); int nf_flow_table_offload_init(void); void nf_flow_table_offload_exit(void); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 87b758407868..fe7c50acc681 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -114,7 +114,7 @@ static inline void nft_reg_store8(u32 *dreg, u8 val) *(u8 *)dreg = val; } -static inline u8 nft_reg_load8(u32 *sreg) +static inline u8 nft_reg_load8(const u32 *sreg) { return *(u8 *)sreg; } @@ -125,7 +125,7 @@ static inline void nft_reg_store16(u32 *dreg, u16 val) *(u16 *)dreg = val; } -static inline u16 nft_reg_load16(u32 *sreg) +static inline u16 nft_reg_load16(const u32 *sreg) { return *(u16 *)sreg; } @@ -135,7 +135,7 @@ static inline void nft_reg_store64(u32 *dreg, u64 val) put_unaligned(val, (u64 *)dreg); } -static inline u64 nft_reg_load64(u32 *sreg) +static inline u64 nft_reg_load64(const u32 *sreg) { return get_unaligned((u64 *)sreg); } diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h index 03cf5856d76f..ea7d1d78b92d 100644 --- a/include/net/netfilter/nf_tables_offload.h +++ b/include/net/netfilter/nf_tables_offload.h @@ -45,6 +45,7 @@ struct nft_flow_key { struct flow_dissector_key_ip ip; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_eth_addrs eth_addrs; + struct flow_dissector_key_meta meta; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct nft_flow_match { diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 022a0fd1a5a4..5ec054473d81 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -83,6 +83,9 @@ struct netns_ipv6 { #ifdef CONFIG_IPV6_MULTIPLE_TABLES unsigned int fib6_rules_require_fldissect; bool fib6_has_custom_rules; +#ifdef CONFIG_IPV6_SUBTREES + unsigned int fib6_routes_require_src; +#endif struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 1121faa99c12..cfbed00ba7ee 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -34,8 +34,18 @@ #include <linux/ptr_ring.h> #include <linux/dma-direction.h> -#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */ -#define PP_FLAG_ALL PP_FLAG_DMA_MAP +#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA + * map/unmap + */ +#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets + * from page_pool will be + * DMA-synced-for-device according to + * the length provided by the device + * driver. + * Please note DMA-sync-for-CPU is still + * device driver responsibility + */ +#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV) /* * Fast allocation side cache array/stack @@ -65,6 +75,8 @@ struct page_pool_params { int nid; /* Numa node id to allocate from pages from */ struct device *dev; /* device, for DMA pre-mapping purposes */ enum dma_data_direction dma_dir; /* DMA mapping direction */ + unsigned int max_len; /* max DMA sync memory size */ + unsigned int offset; /* DMA addr offset */ }; struct page_pool { @@ -112,6 +124,8 @@ struct page_pool { * refcnt serves purpose is to simplify drivers error handling. */ refcount_t user_cnt; + + u64 destroy_cnt; }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); @@ -149,8 +163,8 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool, #endif /* Never call this directly, use helpers below */ -void __page_pool_put_page(struct page_pool *pool, - struct page *page, bool allow_direct); +void __page_pool_put_page(struct page_pool *pool, struct page *page, + unsigned int dma_sync_size, bool allow_direct); static inline void page_pool_put_page(struct page_pool *pool, struct page *page, bool allow_direct) @@ -159,14 +173,14 @@ static inline void page_pool_put_page(struct page_pool *pool, * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL - __page_pool_put_page(pool, page, allow_direct); + __page_pool_put_page(pool, page, -1, allow_direct); #endif } /* Very limited use-cases allow recycle direct */ static inline void page_pool_recycle_direct(struct page_pool *pool, struct page *page) { - __page_pool_put_page(pool, page, true); + __page_pool_put_page(pool, page, -1, true); } /* Disconnects a page (from a page_pool). API users can have a need @@ -202,4 +216,11 @@ static inline bool page_pool_put(struct page_pool *pool) return refcount_dec_and_test(&pool->user_cnt); } +/* Caller must provide appropriate safe context, e.g. NAPI. */ +void page_pool_update_nid(struct page_pool *pool, int new_nid); +static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) +{ + if (unlikely(pool->p.nid != new_nid)) + page_pool_update_nid(pool, new_nid); +} #endif /* _NET_PAGE_POOL_H */ diff --git a/include/net/route.h b/include/net/route.h index 6c516840380d..a9c60fc68e36 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -185,6 +185,10 @@ int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src, u8 tos, struct net_device *devin, struct fib_result *res); +int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src, + u8 tos, struct net_device *devin, + const struct sk_buff *hint); + static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, u8 tos, struct net_device *devin) { diff --git a/include/net/tls.h b/include/net/tls.h index e6becd6218a3..6ed91e82edd0 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -337,6 +337,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); void tls_sw_strparser_done(struct tls_context *tls_ctx); int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); +int tls_sw_sendpage_locked(struct sock *sk, struct page *page, + int offset, size_t size, int flags); int tls_sw_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index a836afe8f68e..e1108a5f4f17 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -406,6 +406,13 @@ struct ocelot_ops { int (*reset)(struct ocelot *ocelot); }; +struct ocelot_skb { + struct list_head head; + struct sk_buff *skb; + u8 id; +}; + + struct ocelot_port { struct ocelot *ocelot; @@ -533,7 +540,11 @@ int ocelot_fdb_del(struct ocelot *ocelot, int port, int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, bool untagged); int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid); +int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr); +int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr); int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); -void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts); +int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port, + struct sk_buff *skb); +void ocelot_get_txtstamp(struct ocelot *ocelot); #endif diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h index 47b5ee880aa9..ad0aa7f31675 100644 --- a/include/trace/events/page_pool.h +++ b/include/trace/events/page_pool.h @@ -8,9 +8,10 @@ #include <linux/types.h> #include <linux/tracepoint.h> +#include <trace/events/mmflags.h> #include <net/page_pool.h> -TRACE_EVENT(page_pool_inflight, +TRACE_EVENT(page_pool_release, TP_PROTO(const struct page_pool *pool, s32 inflight, u32 hold, u32 release), @@ -22,6 +23,7 @@ TRACE_EVENT(page_pool_inflight, __field(s32, inflight) __field(u32, hold) __field(u32, release) + __field(u64, cnt) ), TP_fast_assign( @@ -29,10 +31,12 @@ TRACE_EVENT(page_pool_inflight, __entry->inflight = inflight; __entry->hold = hold; __entry->release = release; + __entry->cnt = pool->destroy_cnt; ), - TP_printk("page_pool=%p inflight=%d hold=%u release=%u", - __entry->pool, __entry->inflight, __entry->hold, __entry->release) + TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu", + __entry->pool, __entry->inflight, __entry->hold, + __entry->release, __entry->cnt) ); TRACE_EVENT(page_pool_state_release, @@ -46,16 +50,18 @@ TRACE_EVENT(page_pool_state_release, __field(const struct page_pool *, pool) __field(const struct page *, page) __field(u32, release) + __field(unsigned long, pfn) ), TP_fast_assign( __entry->pool = pool; __entry->page = page; __entry->release = release; + __entry->pfn = page_to_pfn(page); ), - TP_printk("page_pool=%p page=%p release=%u", - __entry->pool, __entry->page, __entry->release) + TP_printk("page_pool=%p page=%p pfn=%lu release=%u", + __entry->pool, __entry->page, __entry->pfn, __entry->release) ); TRACE_EVENT(page_pool_state_hold, @@ -69,16 +75,40 @@ TRACE_EVENT(page_pool_state_hold, __field(const struct page_pool *, pool) __field(const struct page *, page) __field(u32, hold) + __field(unsigned long, pfn) ), TP_fast_assign( __entry->pool = pool; __entry->page = page; __entry->hold = hold; + __entry->pfn = page_to_pfn(page); ), - TP_printk("page_pool=%p page=%p hold=%u", - __entry->pool, __entry->page, __entry->hold) + TP_printk("page_pool=%p page=%p pfn=%lu hold=%u", + __entry->pool, __entry->page, __entry->pfn, __entry->hold) +); + +TRACE_EVENT(page_pool_update_nid, + + TP_PROTO(const struct page_pool *pool, int new_nid), + + TP_ARGS(pool, new_nid), + + TP_STRUCT__entry( + __field(const struct page_pool *, pool) + __field(int, pool_nid) + __field(int, new_nid) + ), + + TP_fast_assign( + __entry->pool = pool; + __entry->pool_nid = pool->p.nid; + __entry->new_nid = new_nid; + ), + + TP_printk("page_pool=%p pool_nid=%d new_nid=%d", + __entry->pool, __entry->pool_nid, __entry->new_nid) ); #endif /* _TRACE_PAGE_POOL_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index df6809a76404..dbbcf0b02970 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -201,6 +201,8 @@ enum bpf_attach_type { BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, BPF_TRACE_RAW_TP, + BPF_TRACE_FENTRY, + BPF_TRACE_FEXIT, __MAX_BPF_ATTACH_TYPE }; @@ -346,6 +348,9 @@ enum bpf_attach_type { /* Clone map from listener for newly accepted socket */ #define BPF_F_CLONE (1U << 9) +/* Enable memory-mapping BPF map */ +#define BPF_F_MMAPABLE (1U << 10) + /* flags for BPF_PROG_QUERY */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) @@ -423,6 +428,7 @@ union bpf_attr { __aligned_u64 line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + __u32 attach_prog_fd; /* 0 to attach to vmlinux */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index eea166c52c36..11a72a938eb1 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -205,6 +205,8 @@ enum ipset_cadt_flags { IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD), IPSET_FLAG_BIT_WITH_SKBINFO = 6, IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO), + IPSET_FLAG_BIT_IFACE_WILDCARD = 7, + IPSET_FLAG_IFACE_WILDCARD = (1 << IPSET_FLAG_BIT_IFACE_WILDCARD), IPSET_FLAG_CADT_MAX = 15, }; diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index c6ad22f76ede..449a63971451 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -571,6 +571,14 @@ enum { * TCA_FLOWER_KEY_ENC_OPT_GENEVE_ * attributes */ + TCA_FLOWER_KEY_ENC_OPTS_VXLAN, /* Nested + * TCA_FLOWER_KEY_ENC_OPT_VXLAN_ + * attributes + */ + TCA_FLOWER_KEY_ENC_OPTS_ERSPAN, /* Nested + * TCA_FLOWER_KEY_ENC_OPT_ERSPAN_ + * attributes + */ __TCA_FLOWER_KEY_ENC_OPTS_MAX, }; @@ -589,6 +597,27 @@ enum { (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1) enum { + TCA_FLOWER_KEY_ENC_OPT_VXLAN_UNSPEC, + TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, /* u32 */ + __TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, +}; + +#define TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX \ + (__TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX - 1) + +enum { + TCA_FLOWER_KEY_ENC_OPT_ERSPAN_UNSPEC, + TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, /* u8 */ + TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, /* be32 */ + TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, /* u8 */ + TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, /* u8 */ + __TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, +}; + +#define TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX \ + (__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1) + +enum { TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0), TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1), }; diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 5011259b8f67..9f1a72876212 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -950,19 +950,25 @@ enum { TCA_PIE_BETA, TCA_PIE_ECN, TCA_PIE_BYTEMODE, + TCA_PIE_DQ_RATE_ESTIMATOR, __TCA_PIE_MAX }; #define TCA_PIE_MAX (__TCA_PIE_MAX - 1) struct tc_pie_xstats { - __u64 prob; /* current probability */ - __u32 delay; /* current delay in ms */ - __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */ - __u32 packets_in; /* total number of packets enqueued */ - __u32 dropped; /* packets dropped due to pie_action */ - __u32 overlimit; /* dropped due to lack of space in queue */ - __u32 maxq; /* maximum queue size */ - __u32 ecn_mark; /* packets marked with ecn*/ + __u64 prob; /* current probability */ + __u32 delay; /* current delay in ms */ + __u32 avg_dq_rate; /* current average dq_rate in + * bits/pie_time + */ + __u32 dq_rate_estimating; /* is avg_dq_rate being calculated? */ + __u32 packets_in; /* total number of packets enqueued */ + __u32 dropped; /* packets dropped due to pie_action */ + __u32 overlimit; /* dropped due to lack of space + * in queue + */ + __u32 maxq; /* maximum queue size */ + __u32 ecn_mark; /* packets marked with ecn*/ }; /* CBS */ diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h index 41c8b462c177..3f10dc4e7a4b 100644 --- a/include/uapi/linux/tc_act/tc_tunnel_key.h +++ b/include/uapi/linux/tc_act/tc_tunnel_key.h @@ -50,6 +50,14 @@ enum { * TCA_TUNNEL_KEY_ENC_OPTS_ * attributes */ + TCA_TUNNEL_KEY_ENC_OPTS_VXLAN, /* Nested + * TCA_TUNNEL_KEY_ENC_OPTS_ + * attributes + */ + TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN, /* Nested + * TCA_TUNNEL_KEY_ENC_OPTS_ + * attributes + */ __TCA_TUNNEL_KEY_ENC_OPTS_MAX, }; @@ -67,4 +75,25 @@ enum { #define TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX \ (__TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX - 1) +enum { + TCA_TUNNEL_KEY_ENC_OPT_VXLAN_UNSPEC, + TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, /* u32 */ + __TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX, +}; + +#define TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX \ + (__TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX - 1) + +enum { + TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_UNSPEC, + TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, /* u8 */ + TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, /* be32 */ + TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR, /* u8 */ + TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID, /* u8 */ + __TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX, +}; + +#define TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX \ + (__TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX - 1) + #endif diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index e1d9adb212f9..3f671bf617e8 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o obj-$(CONFIG_BPF_SYSCALL) += disasm.o +obj-$(CONFIG_BPF_JIT) += trampoline.o obj-$(CONFIG_BPF_SYSCALL) += btf.o ifeq ($(CONFIG_NET),y) obj-$(CONFIG_BPF_SYSCALL) += devmap.o diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 1c65ce0098a9..633c8c701ff6 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -14,7 +14,7 @@ #include "map_in_map.h" #define ARRAY_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) + (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK) static void bpf_array_free_percpu(struct bpf_array *array) { @@ -59,6 +59,10 @@ int array_map_alloc_check(union bpf_attr *attr) (percpu && numa_node != NUMA_NO_NODE)) return -EINVAL; + if (attr->map_type != BPF_MAP_TYPE_ARRAY && + attr->map_flags & BPF_F_MMAPABLE) + return -EINVAL; + if (attr->value_size > KMALLOC_MAX_SIZE) /* if value_size is bigger, the user space won't be able to * access the elements. @@ -102,10 +106,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) } array_size = sizeof(*array); - if (percpu) + if (percpu) { array_size += (u64) max_entries * sizeof(void *); - else - array_size += (u64) max_entries * elem_size; + } else { + /* rely on vmalloc() to return page-aligned memory and + * ensure array->value is exactly page-aligned + */ + if (attr->map_flags & BPF_F_MMAPABLE) { + array_size = PAGE_ALIGN(array_size); + array_size += PAGE_ALIGN((u64) max_entries * elem_size); + } else { + array_size += (u64) max_entries * elem_size; + } + } /* make sure there is no u32 overflow later in round_up() */ cost = array_size; @@ -117,7 +130,20 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) return ERR_PTR(ret); /* allocate all map elements and zero-initialize them */ - array = bpf_map_area_alloc(array_size, numa_node); + if (attr->map_flags & BPF_F_MMAPABLE) { + void *data; + + /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ + data = bpf_map_area_mmapable_alloc(array_size, numa_node); + if (!data) { + bpf_map_charge_finish(&mem); + return ERR_PTR(-ENOMEM); + } + array = data + PAGE_ALIGN(sizeof(struct bpf_array)) + - offsetof(struct bpf_array, value); + } else { + array = bpf_map_area_alloc(array_size, numa_node); + } if (!array) { bpf_map_charge_finish(&mem); return ERR_PTR(-ENOMEM); @@ -350,6 +376,11 @@ static int array_map_delete_elem(struct bpf_map *map, void *key) return -EINVAL; } +static void *array_map_vmalloc_addr(struct bpf_array *array) +{ + return (void *)round_down((unsigned long)array, PAGE_SIZE); +} + /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ static void array_map_free(struct bpf_map *map) { @@ -365,7 +396,10 @@ static void array_map_free(struct bpf_map *map) if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) bpf_array_free_percpu(array); - bpf_map_area_free(array); + if (array->map.map_flags & BPF_F_MMAPABLE) + bpf_map_area_free(array_map_vmalloc_addr(array)); + else + bpf_map_area_free(array); } static void array_map_seq_show_elem(struct bpf_map *map, void *key, @@ -444,6 +478,17 @@ static int array_map_check_btf(const struct bpf_map *map, return 0; } +static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; + + if (!(map->map_flags & BPF_F_MMAPABLE)) + return -EINVAL; + + return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), pgoff); +} + const struct bpf_map_ops array_map_ops = { .map_alloc_check = array_map_alloc_check, .map_alloc = array_map_alloc, @@ -455,6 +500,7 @@ const struct bpf_map_ops array_map_ops = { .map_gen_lookup = array_map_gen_lookup, .map_direct_value_addr = array_map_direct_value_addr, .map_direct_value_meta = array_map_direct_value_meta, + .map_mmap = array_map_mmap, .map_seq_show_elem = array_map_seq_show_elem, .map_check_btf = array_map_check_btf, }; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 128d89601d73..40efde5eedcb 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -2,6 +2,8 @@ /* Copyright (c) 2018 Facebook */ #include <uapi/linux/btf.h> +#include <uapi/linux/bpf.h> +#include <uapi/linux/bpf_perf_event.h> #include <uapi/linux/types.h> #include <linux/seq_file.h> #include <linux/compiler.h> @@ -16,6 +18,9 @@ #include <linux/sort.h> #include <linux/bpf_verifier.h> #include <linux/btf.h> +#include <linux/skmsg.h> +#include <linux/perf_event.h> +#include <net/sock.h> /* BTF (BPF Type Format) is the meta data format which describes * the data types of BPF program/map. Hence, it basically focus @@ -1036,6 +1041,82 @@ static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; } +/* Resolve the size of a passed-in "type" + * + * type: is an array (e.g. u32 array[x][y]) + * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY, + * *type_size: (x * y * sizeof(u32)). Hence, *type_size always + * corresponds to the return type. + * *elem_type: u32 + * *total_nelems: (x * y). Hence, individual elem size is + * (*type_size / *total_nelems) + * + * type: is not an array (e.g. const struct X) + * return type: type "struct X" + * *type_size: sizeof(struct X) + * *elem_type: same as return type ("struct X") + * *total_nelems: 1 + */ +static const struct btf_type * +btf_resolve_size(const struct btf *btf, const struct btf_type *type, + u32 *type_size, const struct btf_type **elem_type, + u32 *total_nelems) +{ + const struct btf_type *array_type = NULL; + const struct btf_array *array; + u32 i, size, nelems = 1; + + for (i = 0; i < MAX_RESOLVE_DEPTH; i++) { + switch (BTF_INFO_KIND(type->info)) { + /* type->size can be used */ + case BTF_KIND_INT: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_ENUM: + size = type->size; + goto resolved; + + case BTF_KIND_PTR: + size = sizeof(void *); + goto resolved; + + /* Modifiers */ + case BTF_KIND_TYPEDEF: + case BTF_KIND_VOLATILE: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + type = btf_type_by_id(btf, type->type); + break; + + case BTF_KIND_ARRAY: + if (!array_type) + array_type = type; + array = btf_type_array(type); + if (nelems && array->nelems > U32_MAX / nelems) + return ERR_PTR(-EINVAL); + nelems *= array->nelems; + type = btf_type_by_id(btf, array->type); + break; + + /* type without size */ + default: + return ERR_PTR(-EINVAL); + } + } + + return ERR_PTR(-EINVAL); + +resolved: + if (nelems && size > U32_MAX / nelems) + return ERR_PTR(-EINVAL); + + *type_size = nelems * size; + *total_nelems = nelems; + *elem_type = type; + + return array_type ? : type; +} + /* The input param "type_id" must point to a needs_resolve type */ static const struct btf_type *btf_type_id_resolve(const struct btf *btf, u32 *type_id) @@ -3363,13 +3444,112 @@ errout: extern char __weak _binary__btf_vmlinux_bin_start[]; extern char __weak _binary__btf_vmlinux_bin_end[]; +extern struct btf *btf_vmlinux; + +#define BPF_MAP_TYPE(_id, _ops) +static union { + struct bpf_ctx_convert { +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ + prog_ctx_type _id##_prog; \ + kern_ctx_type _id##_kern; +#include <linux/bpf_types.h> +#undef BPF_PROG_TYPE + } *__t; + /* 't' is written once under lock. Read many times. */ + const struct btf_type *t; +} bpf_ctx_convert; +enum { +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ + __ctx_convert##_id, +#include <linux/bpf_types.h> +#undef BPF_PROG_TYPE +}; +static u8 bpf_ctx_convert_map[] = { +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ + [_id] = __ctx_convert##_id, +#include <linux/bpf_types.h> +#undef BPF_PROG_TYPE +}; +#undef BPF_MAP_TYPE + +static const struct btf_member * +btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf, + const struct btf_type *t, enum bpf_prog_type prog_type) +{ + const struct btf_type *conv_struct; + const struct btf_type *ctx_struct; + const struct btf_member *ctx_type; + const char *tname, *ctx_tname; + + conv_struct = bpf_ctx_convert.t; + if (!conv_struct) { + bpf_log(log, "btf_vmlinux is malformed\n"); + return NULL; + } + t = btf_type_by_id(btf, t->type); + while (btf_type_is_modifier(t)) + t = btf_type_by_id(btf, t->type); + if (!btf_type_is_struct(t)) { + /* Only pointer to struct is supported for now. + * That means that BPF_PROG_TYPE_TRACEPOINT with BTF + * is not supported yet. + * BPF_PROG_TYPE_RAW_TRACEPOINT is fine. + */ + bpf_log(log, "BPF program ctx type is not a struct\n"); + return NULL; + } + tname = btf_name_by_offset(btf, t->name_off); + if (!tname) { + bpf_log(log, "BPF program ctx struct doesn't have a name\n"); + return NULL; + } + /* prog_type is valid bpf program type. No need for bounds check. */ + ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2; + /* ctx_struct is a pointer to prog_ctx_type in vmlinux. + * Like 'struct __sk_buff' + */ + ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type); + if (!ctx_struct) + /* should not happen */ + return NULL; + ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off); + if (!ctx_tname) { + /* should not happen */ + bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n"); + return NULL; + } + /* only compare that prog's ctx type name is the same as + * kernel expects. No need to compare field by field. + * It's ok for bpf prog to do: + * struct __sk_buff {}; + * int socket_filter_bpf_prog(struct __sk_buff *skb) + * { // no fields of skb are ever used } + */ + if (strcmp(ctx_tname, tname)) + return NULL; + return ctx_type; +} + +static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, + struct btf *btf, + const struct btf_type *t, + enum bpf_prog_type prog_type) +{ + const struct btf_member *prog_ctx_type, *kern_ctx_type; + + prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type); + if (!prog_ctx_type) + return -ENOENT; + kern_ctx_type = prog_ctx_type + 1; + return kern_ctx_type->type; +} struct btf *btf_parse_vmlinux(void) { struct btf_verifier_env *env = NULL; struct bpf_verifier_log *log; struct btf *btf = NULL; - int err; + int err, i; env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); if (!env) @@ -3403,6 +3583,26 @@ struct btf *btf_parse_vmlinux(void) if (err) goto errout; + /* find struct bpf_ctx_convert for type checking later */ + for (i = 1; i <= btf->nr_types; i++) { + const struct btf_type *t; + const char *tname; + + t = btf_type_by_id(btf, i); + if (!__btf_type_is_struct(t)) + continue; + tname = __btf_name_by_offset(btf, t->name_off); + if (!strcmp(tname, "bpf_ctx_convert")) { + /* btf_parse_vmlinux() runs under bpf_verifier_lock */ + bpf_ctx_convert.t = t; + break; + } + } + if (i > btf->nr_types) { + err = -ENOENT; + goto errout; + } + btf_verifier_env_free(env); refcount_set(&btf->refcnt, 1); return btf; @@ -3416,17 +3616,29 @@ errout: return ERR_PTR(err); } -extern struct btf *btf_vmlinux; +struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) +{ + struct bpf_prog *tgt_prog = prog->aux->linked_prog; + + if (tgt_prog) { + return tgt_prog->aux->btf; + } else { + return btf_vmlinux; + } +} bool btf_ctx_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const struct btf_type *t = prog->aux->attach_func_proto; + struct bpf_prog *tgt_prog = prog->aux->linked_prog; + struct btf *btf = bpf_prog_get_target_btf(prog); const char *tname = prog->aux->attach_func_name; struct bpf_verifier_log *log = info->log; const struct btf_param *args; u32 nr_args, arg; + int ret; if (off % 8) { bpf_log(log, "func '%s' offset %d is not multiple of 8\n", @@ -3435,22 +3647,34 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, } arg = off / 8; args = (const struct btf_param *)(t + 1); - nr_args = btf_type_vlen(t); + /* if (t == NULL) Fall back to default BPF prog with 5 u64 arguments */ + nr_args = t ? btf_type_vlen(t) : 5; if (prog->aux->attach_btf_trace) { /* skip first 'void *__data' argument in btf_trace_##name typedef */ args++; nr_args--; } - if (arg >= nr_args) { + + if (prog->expected_attach_type == BPF_TRACE_FEXIT && + arg == nr_args) { + if (!t) + /* Default prog with 5 args. 6th arg is retval. */ + return true; + /* function return type */ + t = btf_type_by_id(btf, t->type); + } else if (arg >= nr_args) { bpf_log(log, "func '%s' doesn't have %d-th argument\n", - tname, arg); + tname, arg + 1); return false; + } else { + if (!t) + /* Default prog with 5 args */ + return true; + t = btf_type_by_id(btf, args[arg].type); } - - t = btf_type_by_id(btf_vmlinux, args[arg].type); /* skip modifiers */ while (btf_type_is_modifier(t)) - t = btf_type_by_id(btf_vmlinux, t->type); + t = btf_type_by_id(btf, t->type); if (btf_type_is_int(t)) /* accessing a scalar */ return true; @@ -3458,7 +3682,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, bpf_log(log, "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n", tname, arg, - __btf_name_by_offset(btf_vmlinux, t->name_off), + __btf_name_by_offset(btf, t->name_off), btf_kind_str[BTF_INFO_KIND(t->info)]); return false; } @@ -3473,10 +3697,19 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, info->reg_type = PTR_TO_BTF_ID; info->btf_id = t->type; - t = btf_type_by_id(btf_vmlinux, t->type); + if (tgt_prog) { + ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type); + if (ret > 0) { + info->btf_id = ret; + return true; + } else { + return false; + } + } + t = btf_type_by_id(btf, t->type); /* skip modifiers */ while (btf_type_is_modifier(t)) - t = btf_type_by_id(btf_vmlinux, t->type); + t = btf_type_by_id(btf, t->type); if (!btf_type_is_struct(t)) { bpf_log(log, "func '%s' arg%d type %s is not a struct\n", @@ -3485,7 +3718,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, } bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n", tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)], - __btf_name_by_offset(btf_vmlinux, t->name_off)); + __btf_name_by_offset(btf, t->name_off)); return true; } @@ -3494,10 +3727,10 @@ int btf_struct_access(struct bpf_verifier_log *log, enum bpf_access_type atype, u32 *next_btf_id) { + u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; + const struct btf_type *mtype, *elem_type = NULL; const struct btf_member *member; - const struct btf_type *mtype; const char *tname, *mname; - int i, moff = 0, msize; again: tname = __btf_name_by_offset(btf_vmlinux, t->name_off); @@ -3507,40 +3740,88 @@ again: } for_each_member(i, t, member) { - /* offset of the field in bits */ - moff = btf_member_bit_offset(t, member); - if (btf_member_bitfield_size(t, member)) /* bitfields are not supported yet */ continue; - if (off + size <= moff / 8) + /* offset of the field in bytes */ + moff = btf_member_bit_offset(t, member) / 8; + if (off + size <= moff) /* won't find anything, field is already too far */ break; + /* In case of "off" is pointing to holes of a struct */ + if (off < moff) + continue; /* type of the field */ mtype = btf_type_by_id(btf_vmlinux, member->type); mname = __btf_name_by_offset(btf_vmlinux, member->name_off); - /* skip modifiers */ - while (btf_type_is_modifier(mtype)) - mtype = btf_type_by_id(btf_vmlinux, mtype->type); - - if (btf_type_is_array(mtype)) - /* array deref is not supported yet */ - continue; - - if (!btf_type_has_size(mtype) && !btf_type_is_ptr(mtype)) { + mtype = btf_resolve_size(btf_vmlinux, mtype, &msize, + &elem_type, &total_nelems); + if (IS_ERR(mtype)) { bpf_log(log, "field %s doesn't have size\n", mname); return -EFAULT; } - if (btf_type_is_ptr(mtype)) - msize = 8; - else - msize = mtype->size; - if (off >= moff / 8 + msize) + + mtrue_end = moff + msize; + if (off >= mtrue_end) /* no overlap with member, keep iterating */ continue; + + if (btf_type_is_array(mtype)) { + u32 elem_idx; + + /* btf_resolve_size() above helps to + * linearize a multi-dimensional array. + * + * The logic here is treating an array + * in a struct as the following way: + * + * struct outer { + * struct inner array[2][2]; + * }; + * + * looks like: + * + * struct outer { + * struct inner array_elem0; + * struct inner array_elem1; + * struct inner array_elem2; + * struct inner array_elem3; + * }; + * + * When accessing outer->array[1][0], it moves + * moff to "array_elem2", set mtype to + * "struct inner", and msize also becomes + * sizeof(struct inner). Then most of the + * remaining logic will fall through without + * caring the current member is an array or + * not. + * + * Unlike mtype/msize/moff, mtrue_end does not + * change. The naming difference ("_true") tells + * that it is not always corresponding to + * the current mtype/msize/moff. + * It is the true end of the current + * member (i.e. array in this case). That + * will allow an int array to be accessed like + * a scratch space, + * i.e. allow access beyond the size of + * the array's element as long as it is + * within the mtrue_end boundary. + */ + + /* skip empty array */ + if (moff == mtrue_end) + continue; + + msize /= total_nelems; + elem_idx = (off - moff) / msize; + moff += elem_idx * msize; + mtype = elem_type; + } + /* the 'off' we're looking for is either equal to start * of this field or inside of this struct */ @@ -3549,20 +3830,20 @@ again: t = mtype; /* adjust offset we're looking for */ - off -= moff / 8; + off -= moff; goto again; } - if (msize != size) { - /* field access size doesn't match */ - bpf_log(log, - "cannot access %d bytes in struct %s field %s that has size %d\n", - size, tname, mname, msize); - return -EACCES; - } if (btf_type_is_ptr(mtype)) { const struct btf_type *stype; + if (msize != size || off != moff) { + bpf_log(log, + "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n", + mname, moff, tname, off, size); + return -EACCES; + } + stype = btf_type_by_id(btf_vmlinux, mtype->type); /* skip modifiers */ while (btf_type_is_modifier(stype)) @@ -3572,14 +3853,28 @@ again: return PTR_TO_BTF_ID; } } - /* all other fields are treated as scalars */ + + /* Allow more flexible access within an int as long as + * it is within mtrue_end. + * Since mtrue_end could be the end of an array, + * that also allows using an array of int as a scratch + * space. e.g. skb->cb[]. + */ + if (off + size > mtrue_end) { + bpf_log(log, + "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n", + mname, mtrue_end, tname, off, size); + return -EACCES; + } + return SCALAR_VALUE; } bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off); return -EINVAL; } -u32 btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn, int arg) +static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn, + int arg) { char fnname[KSYM_SYMBOL_LEN + 4] = "btf_"; const struct btf_param *args; @@ -3647,6 +3942,185 @@ u32 btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn, int arg) return btf_id; } +int btf_resolve_helper_id(struct bpf_verifier_log *log, + const struct bpf_func_proto *fn, int arg) +{ + int *btf_id = &fn->btf_id[arg]; + int ret; + + if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID) + return -EINVAL; + + ret = READ_ONCE(*btf_id); + if (ret) + return ret; + /* ok to race the search. The result is the same */ + ret = __btf_resolve_helper_id(log, fn->func, arg); + if (!ret) { + /* Function argument cannot be type 'void' */ + bpf_log(log, "BTF resolution bug\n"); + return -EFAULT; + } + WRITE_ONCE(*btf_id, ret); + return ret; +} + +static int __get_type_size(struct btf *btf, u32 btf_id, + const struct btf_type **bad_type) +{ + const struct btf_type *t; + + if (!btf_id) + /* void */ + return 0; + t = btf_type_by_id(btf, btf_id); + while (t && btf_type_is_modifier(t)) + t = btf_type_by_id(btf, t->type); + if (!t) + return -EINVAL; + if (btf_type_is_ptr(t)) + /* kernel size of pointer. Not BPF's size of pointer*/ + return sizeof(void *); + if (btf_type_is_int(t) || btf_type_is_enum(t)) + return t->size; + *bad_type = t; + return -EINVAL; +} + +int btf_distill_func_proto(struct bpf_verifier_log *log, + struct btf *btf, + const struct btf_type *func, + const char *tname, + struct btf_func_model *m) +{ + const struct btf_param *args; + const struct btf_type *t; + u32 i, nargs; + int ret; + + if (!func) { + /* BTF function prototype doesn't match the verifier types. + * Fall back to 5 u64 args. + */ + for (i = 0; i < 5; i++) + m->arg_size[i] = 8; + m->ret_size = 8; + m->nr_args = 5; + return 0; + } + args = (const struct btf_param *)(func + 1); + nargs = btf_type_vlen(func); + if (nargs >= MAX_BPF_FUNC_ARGS) { + bpf_log(log, + "The function %s has %d arguments. Too many.\n", + tname, nargs); + return -EINVAL; + } + ret = __get_type_size(btf, func->type, &t); + if (ret < 0) { + bpf_log(log, + "The function %s return type %s is unsupported.\n", + tname, btf_kind_str[BTF_INFO_KIND(t->info)]); + return -EINVAL; + } + m->ret_size = ret; + + for (i = 0; i < nargs; i++) { + ret = __get_type_size(btf, args[i].type, &t); + if (ret < 0) { + bpf_log(log, + "The function %s arg%d type %s is unsupported.\n", + tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]); + return -EINVAL; + } + m->arg_size[i] = ret; + } + m->nr_args = nargs; + return 0; +} + +int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog) +{ + struct bpf_verifier_state *st = env->cur_state; + struct bpf_func_state *func = st->frame[st->curframe]; + struct bpf_reg_state *reg = func->regs; + struct bpf_verifier_log *log = &env->log; + struct bpf_prog *prog = env->prog; + struct btf *btf = prog->aux->btf; + const struct btf_param *args; + const struct btf_type *t; + u32 i, nargs, btf_id; + const char *tname; + + if (!prog->aux->func_info) + return 0; + + btf_id = prog->aux->func_info[subprog].type_id; + if (!btf_id) + return 0; + + if (prog->aux->func_info_aux[subprog].unreliable) + return 0; + + t = btf_type_by_id(btf, btf_id); + if (!t || !btf_type_is_func(t)) { + bpf_log(log, "BTF of subprog %d doesn't point to KIND_FUNC\n", + subprog); + return -EINVAL; + } + tname = btf_name_by_offset(btf, t->name_off); + + t = btf_type_by_id(btf, t->type); + if (!t || !btf_type_is_func_proto(t)) { + bpf_log(log, "Invalid type of func %s\n", tname); + return -EINVAL; + } + args = (const struct btf_param *)(t + 1); + nargs = btf_type_vlen(t); + if (nargs > 5) { + bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs); + goto out; + } + /* check that BTF function arguments match actual types that the + * verifier sees. + */ + for (i = 0; i < nargs; i++) { + t = btf_type_by_id(btf, args[i].type); + while (btf_type_is_modifier(t)) + t = btf_type_by_id(btf, t->type); + if (btf_type_is_int(t) || btf_type_is_enum(t)) { + if (reg[i + 1].type == SCALAR_VALUE) + continue; + bpf_log(log, "R%d is not a scalar\n", i + 1); + goto out; + } + if (btf_type_is_ptr(t)) { + if (reg[i + 1].type == SCALAR_VALUE) { + bpf_log(log, "R%d is not a pointer\n", i + 1); + goto out; + } + /* If program is passing PTR_TO_CTX into subprogram + * check that BTF type matches. + */ + if (reg[i + 1].type == PTR_TO_CTX && + !btf_get_prog_ctx_type(log, btf, t, prog->type)) + goto out; + /* All other pointers are ok */ + continue; + } + bpf_log(log, "Unrecognized argument type %s\n", + btf_kind_str[BTF_INFO_KIND(t->info)]); + goto out; + } + return 0; +out: + /* LLVM optimizations can remove arguments from static functions. */ + bpf_log(log, + "Type info disagrees with actual arguments due to compiler optimizations\n"); + prog->aux->func_info_aux[subprog].unreliable = true; + return 0; +} + void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, struct seq_file *m) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 97e37d82a1cc..b5945c3aaa8e 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -31,6 +31,7 @@ #include <linux/rcupdate.h> #include <linux/perf_event.h> #include <linux/extable.h> +#include <linux/log2.h> #include <asm/unaligned.h> /* Registers */ @@ -815,6 +816,9 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, struct bpf_binary_header *hdr; u32 size, hole, start, pages; + WARN_ON_ONCE(!is_power_of_2(alignment) || + alignment > BPF_IMAGE_ALIGNMENT); + /* Most of BPF filters are really small, but if some of them * fill a page, allow at least 128 extra bytes to insert a * random section of illegal instructions. @@ -1569,7 +1573,7 @@ out: #undef LDST #define LDX_PROBE(SIZEOP, SIZE) \ LDX_PROBE_MEM_##SIZEOP: \ - bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) SRC); \ + bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off)); \ CONT; LDX_PROBE(B, 1) LDX_PROBE(H, 2) @@ -2011,6 +2015,7 @@ static void bpf_prog_free_deferred(struct work_struct *work) if (aux->prog->has_callchain_buf) put_callchain_buffers(); #endif + bpf_trampoline_put(aux->trampoline); for (i = 0; i < aux->func_cnt; i++) bpf_jit_free(aux->func[i]); if (aux->func_cnt) { @@ -2026,6 +2031,8 @@ void bpf_prog_free(struct bpf_prog *fp) { struct bpf_prog_aux *aux = fp->aux; + if (aux->linked_prog) + bpf_prog_put(aux->linked_prog); INIT_WORK(&aux->work, bpf_prog_free_deferred); schedule_work(&aux->work); } @@ -2140,6 +2147,12 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to, return -EFAULT; } +int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, + void *addr1, void *addr2) +{ + return -ENOTSUPP; +} + DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); EXPORT_SYMBOL(bpf_stats_enabled_key); diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index a70f7209cda3..ecf42bec38c0 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type) { switch (type) { case BPF_TYPE_PROG: - raw = bpf_prog_inc(raw); + bpf_prog_inc(raw); break; case BPF_TYPE_MAP: - raw = bpf_map_inc(raw, true); + bpf_map_inc_with_uref(raw); break; default: WARN_ON_ONCE(1); @@ -534,7 +534,8 @@ static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type if (!bpf_prog_get_ok(prog, &type, false)) return ERR_PTR(-EINVAL); - return bpf_prog_inc(prog); + bpf_prog_inc(prog); + return prog; } struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c index fab4fb134547..4cbe987be35b 100644 --- a/kernel/bpf/map_in_map.c +++ b/kernel/bpf/map_in_map.c @@ -98,7 +98,7 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map, return inner_map; if (bpf_map_meta_equal(map->inner_map_meta, inner_map)) - inner_map = bpf_map_inc(inner_map, false); + bpf_map_inc(inner_map); else inner_map = ERR_PTR(-EINVAL); diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index ba635209ae9a..5b9da0954a27 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -678,8 +678,10 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv) down_write(&bpf_devs_lock); if (!offdevs_inited) { err = rhashtable_init(&offdevs, &offdevs_params); - if (err) + if (err) { + up_write(&bpf_devs_lock); return ERR_PTR(err); + } offdevs_inited = true; } up_write(&bpf_devs_lock); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index d447b5e343bf..4ae52eb05f41 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(map_idr_lock); int sysctl_unprivileged_bpf_disabled __read_mostly; static const struct bpf_map_ops * const bpf_map_types[] = { -#define BPF_PROG_TYPE(_id, _ops) +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) #define BPF_MAP_TYPE(_id, _ops) \ [_id] = &_ops, #include <linux/bpf_types.h> @@ -127,7 +127,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) return map; } -void *bpf_map_area_alloc(u64 size, int numa_node) +static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable) { /* We really just want to fail instead of triggering OOM killer * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, @@ -145,18 +145,33 @@ void *bpf_map_area_alloc(u64 size, int numa_node) if (size >= SIZE_MAX) return NULL; - if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { + /* kmalloc()'ed memory can't be mmap()'ed */ + if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, numa_node); if (area != NULL) return area; } - + if (mmapable) { + BUG_ON(!PAGE_ALIGNED(size)); + return vmalloc_user_node_flags(size, numa_node, GFP_KERNEL | + __GFP_RETRY_MAYFAIL | flags); + } return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | __GFP_RETRY_MAYFAIL | flags, __builtin_return_address(0)); } +void *bpf_map_area_alloc(u64 size, int numa_node) +{ + return __bpf_map_area_alloc(size, numa_node, false); +} + +void *bpf_map_area_mmapable_alloc(u64 size, int numa_node) +{ + return __bpf_map_area_alloc(size, numa_node, true); +} + void bpf_map_area_free(void *area) { kvfree(area); @@ -314,7 +329,7 @@ static void bpf_map_free_deferred(struct work_struct *work) static void bpf_map_put_uref(struct bpf_map *map) { - if (atomic_dec_and_test(&map->usercnt)) { + if (atomic64_dec_and_test(&map->usercnt)) { if (map->ops->map_release_uref) map->ops->map_release_uref(map); } @@ -325,7 +340,7 @@ static void bpf_map_put_uref(struct bpf_map *map) */ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) { - if (atomic_dec_and_test(&map->refcnt)) { + if (atomic64_dec_and_test(&map->refcnt)) { /* bpf_map_free_id() must be called first */ bpf_map_free_id(map, do_idr_lock); btf_put(map->btf); @@ -428,6 +443,74 @@ static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, return -EINVAL; } +/* called for any extra memory-mapped regions (except initial) */ +static void bpf_map_mmap_open(struct vm_area_struct *vma) +{ + struct bpf_map *map = vma->vm_file->private_data; + + bpf_map_inc_with_uref(map); + + if (vma->vm_flags & VM_WRITE) { + mutex_lock(&map->freeze_mutex); + map->writecnt++; + mutex_unlock(&map->freeze_mutex); + } +} + +/* called for all unmapped memory region (including initial) */ +static void bpf_map_mmap_close(struct vm_area_struct *vma) +{ + struct bpf_map *map = vma->vm_file->private_data; + + if (vma->vm_flags & VM_WRITE) { + mutex_lock(&map->freeze_mutex); + map->writecnt--; + mutex_unlock(&map->freeze_mutex); + } + + bpf_map_put_with_uref(map); +} + +static const struct vm_operations_struct bpf_map_default_vmops = { + .open = bpf_map_mmap_open, + .close = bpf_map_mmap_close, +}; + +static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct bpf_map *map = filp->private_data; + int err; + + if (!map->ops->map_mmap || map_value_has_spin_lock(map)) + return -ENOTSUPP; + + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + mutex_lock(&map->freeze_mutex); + + if ((vma->vm_flags & VM_WRITE) && map->frozen) { + err = -EPERM; + goto out; + } + + /* set default open/close callbacks */ + vma->vm_ops = &bpf_map_default_vmops; + vma->vm_private_data = map; + + err = map->ops->map_mmap(map, vma); + if (err) + goto out; + + bpf_map_inc_with_uref(map); + + if (vma->vm_flags & VM_WRITE) + map->writecnt++; +out: + mutex_unlock(&map->freeze_mutex); + return err; +} + const struct file_operations bpf_map_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = bpf_map_show_fdinfo, @@ -435,6 +518,7 @@ const struct file_operations bpf_map_fops = { .release = bpf_map_release, .read = bpf_dummy_read, .write = bpf_dummy_write, + .mmap = bpf_map_mmap, }; int bpf_map_new_fd(struct bpf_map *map, int flags) @@ -578,8 +662,9 @@ static int map_create(union bpf_attr *attr) if (err) goto free_map; - atomic_set(&map->refcnt, 1); - atomic_set(&map->usercnt, 1); + atomic64_set(&map->refcnt, 1); + atomic64_set(&map->usercnt, 1); + mutex_init(&map->freeze_mutex); if (attr->btf_key_type_id || attr->btf_value_type_id) { struct btf *btf; @@ -656,21 +741,19 @@ struct bpf_map *__bpf_map_get(struct fd f) return f.file->private_data; } -/* prog's and map's refcnt limit */ -#define BPF_MAX_REFCNT 32768 - -struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) +void bpf_map_inc(struct bpf_map *map) { - if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { - atomic_dec(&map->refcnt); - return ERR_PTR(-EBUSY); - } - if (uref) - atomic_inc(&map->usercnt); - return map; + atomic64_inc(&map->refcnt); } EXPORT_SYMBOL_GPL(bpf_map_inc); +void bpf_map_inc_with_uref(struct bpf_map *map) +{ + atomic64_inc(&map->refcnt); + atomic64_inc(&map->usercnt); +} +EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref); + struct bpf_map *bpf_map_get_with_uref(u32 ufd) { struct fd f = fdget(ufd); @@ -680,38 +763,30 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd) if (IS_ERR(map)) return map; - map = bpf_map_inc(map, true); + bpf_map_inc_with_uref(map); fdput(f); return map; } /* map_idr_lock should have been held */ -static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, - bool uref) +static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) { int refold; - refold = atomic_fetch_add_unless(&map->refcnt, 1, 0); - - if (refold >= BPF_MAX_REFCNT) { - __bpf_map_put(map, false); - return ERR_PTR(-EBUSY); - } - + refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); if (!refold) return ERR_PTR(-ENOENT); - if (uref) - atomic_inc(&map->usercnt); + atomic64_inc(&map->usercnt); return map; } -struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref) +struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) { spin_lock_bh(&map_idr_lock); - map = __bpf_map_inc_not_zero(map, uref); + map = __bpf_map_inc_not_zero(map, false); spin_unlock_bh(&map_idr_lock); return map; @@ -1176,6 +1251,13 @@ static int map_freeze(const union bpf_attr *attr) map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); + + mutex_lock(&map->freeze_mutex); + + if (map->writecnt) { + err = -EBUSY; + goto err_put; + } if (READ_ONCE(map->frozen)) { err = -EBUSY; goto err_put; @@ -1187,12 +1269,13 @@ static int map_freeze(const union bpf_attr *attr) WRITE_ONCE(map->frozen, true); err_put: + mutex_unlock(&map->freeze_mutex); fdput(f); return err; } static const struct bpf_prog_ops * const bpf_prog_types[] = { -#define BPF_PROG_TYPE(_id, _name) \ +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ [_id] = & _name ## _prog_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> @@ -1331,6 +1414,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu) struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); kvfree(aux->func_info); + kfree(aux->func_info_aux); free_used_maps(aux); bpf_prog_uncharge_memlock(aux->prog); security_bpf_prog_free(aux); @@ -1351,7 +1435,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) { - if (atomic_dec_and_test(&prog->aux->refcnt)) { + if (atomic64_dec_and_test(&prog->aux->refcnt)) { perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); /* bpf_prog_free_id() must be called first */ bpf_prog_free_id(prog, do_idr_lock); @@ -1457,13 +1541,9 @@ static struct bpf_prog *____bpf_prog_get(struct fd f) return f.file->private_data; } -struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) +void bpf_prog_add(struct bpf_prog *prog, int i) { - if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) { - atomic_sub(i, &prog->aux->refcnt); - return ERR_PTR(-EBUSY); - } - return prog; + atomic64_add(i, &prog->aux->refcnt); } EXPORT_SYMBOL_GPL(bpf_prog_add); @@ -1474,13 +1554,13 @@ void bpf_prog_sub(struct bpf_prog *prog, int i) * path holds a reference to the program, thus atomic_sub() can * be safely used in such cases! */ - WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0); + WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0); } EXPORT_SYMBOL_GPL(bpf_prog_sub); -struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) +void bpf_prog_inc(struct bpf_prog *prog) { - return bpf_prog_add(prog, 1); + atomic64_inc(&prog->aux->refcnt); } EXPORT_SYMBOL_GPL(bpf_prog_inc); @@ -1489,12 +1569,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) { int refold; - refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0); - - if (refold >= BPF_MAX_REFCNT) { - __bpf_prog_put(prog, false); - return ERR_PTR(-EBUSY); - } + refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0); if (!refold) return ERR_PTR(-ENOENT); @@ -1532,7 +1607,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, goto out; } - prog = bpf_prog_inc(prog); + bpf_prog_inc(prog); out: fdput(f); return prog; @@ -1579,7 +1654,7 @@ static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) static int bpf_prog_load_check_attach(enum bpf_prog_type prog_type, enum bpf_attach_type expected_attach_type, - u32 btf_id) + u32 btf_id, u32 prog_fd) { switch (prog_type) { case BPF_PROG_TYPE_TRACING: @@ -1587,7 +1662,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, return -EINVAL; break; default: - if (btf_id) + if (btf_id || prog_fd) return -EINVAL; break; } @@ -1638,7 +1713,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, } /* last field in 'union bpf_attr' used by this command */ -#define BPF_PROG_LOAD_LAST_FIELD attach_btf_id +#define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -1681,7 +1756,8 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) bpf_prog_load_fixup_attach_type(attr); if (bpf_prog_load_check_attach(type, attr->expected_attach_type, - attr->attach_btf_id)) + attr->attach_btf_id, + attr->attach_prog_fd)) return -EINVAL; /* plain bpf_prog allocation */ @@ -1691,6 +1767,16 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) prog->expected_attach_type = attr->expected_attach_type; prog->aux->attach_btf_id = attr->attach_btf_id; + if (attr->attach_prog_fd) { + struct bpf_prog *tgt_prog; + + tgt_prog = bpf_prog_get(attr->attach_prog_fd); + if (IS_ERR(tgt_prog)) { + err = PTR_ERR(tgt_prog); + goto free_prog_nouncharge; + } + prog->aux->linked_prog = tgt_prog; + } prog->aux->offload_requested = !!attr->prog_ifindex; @@ -1712,7 +1798,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) prog->orig_prog = NULL; prog->jited = 0; - atomic_set(&prog->aux->refcnt, 1); + atomic64_set(&prog->aux->refcnt, 1); prog->gpl_compatible = is_gpl ? 1 : 0; if (bpf_prog_is_dev_bound(prog->aux)) { @@ -1802,6 +1888,49 @@ static int bpf_obj_get(const union bpf_attr *attr) attr->file_flags); } +static int bpf_tracing_prog_release(struct inode *inode, struct file *filp) +{ + struct bpf_prog *prog = filp->private_data; + + WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog)); + bpf_prog_put(prog); + return 0; +} + +static const struct file_operations bpf_tracing_prog_fops = { + .release = bpf_tracing_prog_release, + .read = bpf_dummy_read, + .write = bpf_dummy_write, +}; + +static int bpf_tracing_prog_attach(struct bpf_prog *prog) +{ + int tr_fd, err; + + if (prog->expected_attach_type != BPF_TRACE_FENTRY && + prog->expected_attach_type != BPF_TRACE_FEXIT) { + err = -EINVAL; + goto out_put_prog; + } + + err = bpf_trampoline_link_prog(prog); + if (err) + goto out_put_prog; + + tr_fd = anon_inode_getfd("bpf-tracing-prog", &bpf_tracing_prog_fops, + prog, O_CLOEXEC); + if (tr_fd < 0) { + WARN_ON_ONCE(bpf_trampoline_unlink_prog(prog)); + err = tr_fd; + goto out_put_prog; + } + return tr_fd; + +out_put_prog: + bpf_prog_put(prog); + return err; +} + struct bpf_raw_tracepoint { struct bpf_raw_event_map *btp; struct bpf_prog *prog; @@ -1853,14 +1982,16 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) if (prog->type == BPF_PROG_TYPE_TRACING) { if (attr->raw_tracepoint.name) { - /* raw_tp name should not be specified in raw_tp - * programs that were verified via in-kernel BTF info + /* The attach point for this category of programs + * should be specified via btf_id during program load. */ err = -EINVAL; goto out_put_prog; } - /* raw_tp name is taken from type name instead */ - tp_name = prog->aux->attach_func_name; + if (prog->expected_attach_type == BPF_TRACE_RAW_TP) + tp_name = prog->aux->attach_func_name; + else + return bpf_tracing_prog_attach(prog); } else { if (strncpy_from_user(buf, u64_to_user_ptr(attr->raw_tracepoint.name), diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c new file mode 100644 index 000000000000..10ae59d65f13 --- /dev/null +++ b/kernel/bpf/trampoline.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019 Facebook */ +#include <linux/hash.h> +#include <linux/bpf.h> +#include <linux/filter.h> + +/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */ +#define TRAMPOLINE_HASH_BITS 10 +#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS) + +static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE]; + +/* serializes access to trampoline_table */ +static DEFINE_MUTEX(trampoline_mutex); + +struct bpf_trampoline *bpf_trampoline_lookup(u64 key) +{ + struct bpf_trampoline *tr; + struct hlist_head *head; + void *image; + int i; + + mutex_lock(&trampoline_mutex); + head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)]; + hlist_for_each_entry(tr, head, hlist) { + if (tr->key == key) { + refcount_inc(&tr->refcnt); + goto out; + } + } + tr = kzalloc(sizeof(*tr), GFP_KERNEL); + if (!tr) + goto out; + + /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */ + image = bpf_jit_alloc_exec(PAGE_SIZE); + if (!image) { + kfree(tr); + tr = NULL; + goto out; + } + + tr->key = key; + INIT_HLIST_NODE(&tr->hlist); + hlist_add_head(&tr->hlist, head); + refcount_set(&tr->refcnt, 1); + mutex_init(&tr->mutex); + for (i = 0; i < BPF_TRAMP_MAX; i++) + INIT_HLIST_HEAD(&tr->progs_hlist[i]); + + set_vm_flush_reset_perms(image); + /* Keep image as writeable. The alternative is to keep flipping ro/rw + * everytime new program is attached or detached. + */ + set_memory_x((long)image, 1); + tr->image = image; +out: + mutex_unlock(&trampoline_mutex); + return tr; +} + +/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 + * bytes on x86. Pick a number to fit into PAGE_SIZE / 2 + */ +#define BPF_MAX_TRAMP_PROGS 40 + +static int bpf_trampoline_update(struct bpf_trampoline *tr) +{ + void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2; + void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2; + struct bpf_prog *progs_to_run[BPF_MAX_TRAMP_PROGS]; + int fentry_cnt = tr->progs_cnt[BPF_TRAMP_FENTRY]; + int fexit_cnt = tr->progs_cnt[BPF_TRAMP_FEXIT]; + struct bpf_prog **progs, **fentry, **fexit; + u32 flags = BPF_TRAMP_F_RESTORE_REGS; + struct bpf_prog_aux *aux; + int err; + + if (fentry_cnt + fexit_cnt == 0) { + err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL_TO_NOP, + old_image, NULL); + tr->selector = 0; + goto out; + } + + /* populate fentry progs */ + fentry = progs = progs_to_run; + hlist_for_each_entry(aux, &tr->progs_hlist[BPF_TRAMP_FENTRY], tramp_hlist) + *progs++ = aux->prog; + + /* populate fexit progs */ + fexit = progs; + hlist_for_each_entry(aux, &tr->progs_hlist[BPF_TRAMP_FEXIT], tramp_hlist) + *progs++ = aux->prog; + + if (fexit_cnt) + flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME; + + err = arch_prepare_bpf_trampoline(new_image, &tr->func.model, flags, + fentry, fentry_cnt, + fexit, fexit_cnt, + tr->func.addr); + if (err) + goto out; + + if (tr->selector) + /* progs already running at this address */ + err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL_TO_CALL, + old_image, new_image); + else + /* first time registering */ + err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_NOP_TO_CALL, + NULL, new_image); + if (err) + goto out; + tr->selector++; +out: + return err; +} + +static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(enum bpf_attach_type t) +{ + switch (t) { + case BPF_TRACE_FENTRY: + return BPF_TRAMP_FENTRY; + default: + return BPF_TRAMP_FEXIT; + } +} + +int bpf_trampoline_link_prog(struct bpf_prog *prog) +{ + enum bpf_tramp_prog_type kind; + struct bpf_trampoline *tr; + int err = 0; + + tr = prog->aux->trampoline; + kind = bpf_attach_type_to_tramp(prog->expected_attach_type); + mutex_lock(&tr->mutex); + if (tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT] + >= BPF_MAX_TRAMP_PROGS) { + err = -E2BIG; + goto out; + } + if (!hlist_unhashed(&prog->aux->tramp_hlist)) { + /* prog already linked */ + err = -EBUSY; + goto out; + } + hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]); + tr->progs_cnt[kind]++; + err = bpf_trampoline_update(prog->aux->trampoline); + if (err) { + hlist_del(&prog->aux->tramp_hlist); + tr->progs_cnt[kind]--; + } +out: + mutex_unlock(&tr->mutex); + return err; +} + +/* bpf_trampoline_unlink_prog() should never fail. */ +int bpf_trampoline_unlink_prog(struct bpf_prog *prog) +{ + enum bpf_tramp_prog_type kind; + struct bpf_trampoline *tr; + int err; + + tr = prog->aux->trampoline; + kind = bpf_attach_type_to_tramp(prog->expected_attach_type); + mutex_lock(&tr->mutex); + hlist_del(&prog->aux->tramp_hlist); + tr->progs_cnt[kind]--; + err = bpf_trampoline_update(prog->aux->trampoline); + mutex_unlock(&tr->mutex); + return err; +} + +void bpf_trampoline_put(struct bpf_trampoline *tr) +{ + if (!tr) + return; + mutex_lock(&trampoline_mutex); + if (!refcount_dec_and_test(&tr->refcnt)) + goto out; + WARN_ON_ONCE(mutex_is_locked(&tr->mutex)); + if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY]))) + goto out; + if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT]))) + goto out; + bpf_jit_free_exec(tr->image); + hlist_del(&tr->hlist); + kfree(tr); +out: + mutex_unlock(&trampoline_mutex); +} + +/* The logic is similar to BPF_PROG_RUN, but with explicit rcu and preempt that + * are needed for trampoline. The macro is split into + * call _bpf_prog_enter + * call prog->bpf_func + * call __bpf_prog_exit + */ +u64 notrace __bpf_prog_enter(void) +{ + u64 start = 0; + + rcu_read_lock(); + preempt_disable(); + if (static_branch_unlikely(&bpf_stats_enabled_key)) + start = sched_clock(); + return start; +} + +void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start) +{ + struct bpf_prog_stats *stats; + + if (static_branch_unlikely(&bpf_stats_enabled_key) && + /* static_key could be enabled in __bpf_prog_enter + * and disabled in __bpf_prog_exit. + * And vice versa. + * Hence check that 'start' is not zero. + */ + start) { + stats = this_cpu_ptr(prog->aux->stats); + u64_stats_update_begin(&stats->syncp); + stats->cnt++; + stats->nsecs += sched_clock() - start; + u64_stats_update_end(&stats->syncp); + } + preempt_enable(); + rcu_read_unlock(); +} + +int __weak +arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags, + struct bpf_prog **fentry_progs, int fentry_cnt, + struct bpf_prog **fexit_progs, int fexit_cnt, + void *orig_call) +{ + return -ENOTSUPP; +} + +static int __init init_trampolines(void) +{ + int i; + + for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++) + INIT_HLIST_HEAD(&trampoline_table[i]); + return 0; +} +late_initcall(init_trampolines); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2f2374967b36..9f59f7a19dd0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -23,7 +23,7 @@ #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { -#define BPF_PROG_TYPE(_id, _name) \ +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> @@ -3970,6 +3970,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, /* only increment it after check_reg_arg() finished */ state->curframe++; + if (btf_check_func_arg_match(env, subprog)) + return -EINVAL; + /* and go analyze first insn of the callee */ *insn_idx = target_insn; @@ -4147,11 +4150,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn meta.func_id = func_id; /* check args */ for (i = 0; i < 5; i++) { - if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID) { - if (!fn->btf_id[i]) - fn->btf_id[i] = btf_resolve_helper_id(&env->log, fn->func, i); - meta.btf_id = fn->btf_id[i]; - } + err = btf_resolve_helper_id(&env->log, fn, i); + if (err > 0) + meta.btf_id = err; err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta); if (err) return err; @@ -6566,6 +6567,7 @@ static int check_btf_func(struct bpf_verifier_env *env, u32 i, nfuncs, urec_size, min_size; u32 krec_size = sizeof(struct bpf_func_info); struct bpf_func_info *krecord; + struct bpf_func_info_aux *info_aux = NULL; const struct btf_type *type; struct bpf_prog *prog; const struct btf *btf; @@ -6599,6 +6601,9 @@ static int check_btf_func(struct bpf_verifier_env *env, krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); if (!krecord) return -ENOMEM; + info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); + if (!info_aux) + goto err_free; for (i = 0; i < nfuncs; i++) { ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); @@ -6650,29 +6655,31 @@ static int check_btf_func(struct bpf_verifier_env *env, ret = -EINVAL; goto err_free; } - prev_offset = krecord[i].insn_off; urecord += urec_size; } prog->aux->func_info = krecord; prog->aux->func_info_cnt = nfuncs; + prog->aux->func_info_aux = info_aux; return 0; err_free: kvfree(krecord); + kfree(info_aux); return ret; } static void adjust_btf_func(struct bpf_verifier_env *env) { + struct bpf_prog_aux *aux = env->prog->aux; int i; - if (!env->prog->aux->func_info) + if (!aux->func_info) return; for (i = 0; i < env->subprog_cnt; i++) - env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start; + aux->func_info[i].insn_off = env->subprog_info[i].start; } #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ @@ -7653,6 +7660,9 @@ static int do_check(struct bpf_verifier_env *env) 0 /* frameno */, 0 /* subprogno, zero == main subprog */); + if (btf_check_func_arg_match(env, 0)) + return -EINVAL; + for (;;) { struct bpf_insn *insn; u8 class; @@ -8169,11 +8179,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) * will be used by the valid program until it's unloaded * and all maps are released in free_used_maps() */ - map = bpf_map_inc(map, false); - if (IS_ERR(map)) { - fdput(f); - return PTR_ERR(map); - } + bpf_map_inc(map); aux->map_index = env->used_map_cnt; env->used_maps[env->used_map_cnt++] = map; @@ -9380,10 +9386,17 @@ static void print_verification_stats(struct bpf_verifier_env *env) static int check_attach_btf_id(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; + struct bpf_prog *tgt_prog = prog->aux->linked_prog; u32 btf_id = prog->aux->attach_btf_id; const char prefix[] = "btf_trace_"; + int ret = 0, subprog = -1, i; + struct bpf_trampoline *tr; const struct btf_type *t; + bool conservative = true; const char *tname; + struct btf *btf; + long addr; + u64 key; if (prog->type != BPF_PROG_TYPE_TRACING) return 0; @@ -9392,19 +9405,47 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) verbose(env, "Tracing programs must provide btf_id\n"); return -EINVAL; } - t = btf_type_by_id(btf_vmlinux, btf_id); + btf = bpf_prog_get_target_btf(prog); + if (!btf) { + verbose(env, + "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); + return -EINVAL; + } + t = btf_type_by_id(btf, btf_id); if (!t) { verbose(env, "attach_btf_id %u is invalid\n", btf_id); return -EINVAL; } - tname = btf_name_by_offset(btf_vmlinux, t->name_off); + tname = btf_name_by_offset(btf, t->name_off); if (!tname) { verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id); return -EINVAL; } + if (tgt_prog) { + struct bpf_prog_aux *aux = tgt_prog->aux; + + for (i = 0; i < aux->func_info_cnt; i++) + if (aux->func_info[i].type_id == btf_id) { + subprog = i; + break; + } + if (subprog == -1) { + verbose(env, "Subprog %s doesn't exist\n", tname); + return -EINVAL; + } + conservative = aux->func_info_aux[subprog].unreliable; + key = ((u64)aux->id) << 32 | btf_id; + } else { + key = btf_id; + } switch (prog->expected_attach_type) { case BPF_TRACE_RAW_TP: + if (tgt_prog) { + verbose(env, + "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); + return -EINVAL; + } if (!btf_type_is_typedef(t)) { verbose(env, "attach_btf_id %u is not a typedef\n", btf_id); @@ -9416,11 +9457,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) return -EINVAL; } tname += sizeof(prefix) - 1; - t = btf_type_by_id(btf_vmlinux, t->type); + t = btf_type_by_id(btf, t->type); if (!btf_type_is_ptr(t)) /* should never happen in valid vmlinux build */ return -EINVAL; - t = btf_type_by_id(btf_vmlinux, t->type); + t = btf_type_by_id(btf, t->type); if (!btf_type_is_func_proto(t)) /* should never happen in valid vmlinux build */ return -EINVAL; @@ -9432,6 +9473,66 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) prog->aux->attach_func_proto = t; prog->aux->attach_btf_trace = true; return 0; + case BPF_TRACE_FENTRY: + case BPF_TRACE_FEXIT: + if (!btf_type_is_func(t)) { + verbose(env, "attach_btf_id %u is not a function\n", + btf_id); + return -EINVAL; + } + t = btf_type_by_id(btf, t->type); + if (!btf_type_is_func_proto(t)) + return -EINVAL; + tr = bpf_trampoline_lookup(key); + if (!tr) + return -ENOMEM; + prog->aux->attach_func_name = tname; + /* t is either vmlinux type or another program's type */ + prog->aux->attach_func_proto = t; + mutex_lock(&tr->mutex); + if (tr->func.addr) { + prog->aux->trampoline = tr; + goto out; + } + if (tgt_prog && conservative) { + prog->aux->attach_func_proto = NULL; + t = NULL; + } + ret = btf_distill_func_proto(&env->log, btf, t, + tname, &tr->func.model); + if (ret < 0) + goto out; + if (tgt_prog) { + if (!tgt_prog->jited) { + /* for now */ + verbose(env, "Can trace only JITed BPF progs\n"); + ret = -EINVAL; + goto out; + } + if (tgt_prog->type == BPF_PROG_TYPE_TRACING) { + /* prevent cycles */ + verbose(env, "Cannot recursively attach\n"); + ret = -EINVAL; + goto out; + } + addr = (long) tgt_prog->aux->func[subprog]->bpf_func; + } else { + addr = kallsyms_lookup_name(tname); + if (!addr) { + verbose(env, + "The address of function %s cannot be found\n", + tname); + ret = -ENOENT; + goto out; + } + } + tr->func.addr = (void *)addr; + prog->aux->trampoline = tr; +out: + mutex_unlock(&tr->mutex); + if (ret) + bpf_trampoline_put(tr); + return ret; default: return -EINVAL; } diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index da16c30868f3..90c4fce1c981 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -11,10 +11,8 @@ int xsk_map_inc(struct xsk_map *map) { - struct bpf_map *m = &map->map; - - m = bpf_map_inc(m, false); - return PTR_ERR_OR_ZERO(m); + bpf_map_inc(&map->map); + return 0; } void xsk_map_put(struct xsk_map *map) diff --git a/kernel/events/core.c b/kernel/events/core.c index 00a014670ed0..834640057c93 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10477,12 +10477,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, context = parent_event->overflow_handler_context; #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING) if (overflow_handler == bpf_overflow_handler) { - struct bpf_prog *prog = bpf_prog_inc(parent_event->prog); + struct bpf_prog *prog = parent_event->prog; - if (IS_ERR(prog)) { - err = PTR_ERR(prog); - goto err_ns; - } + bpf_prog_inc(prog); event->prog = prog; event->orig_overflow_handler = parent_event->orig_overflow_handler; diff --git a/kernel/fork.c b/kernel/fork.c index 55af6931c6ec..13b38794efb5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1708,11 +1708,11 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f) /* * Poll support for process exit notification. */ -static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts) +static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts) { struct task_struct *task; struct pid *pid = file->private_data; - int poll_flags = 0; + __poll_t poll_flags = 0; poll_wait(file, &pid->wait_pidfd, pts); @@ -1724,7 +1724,7 @@ static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts) * group, then poll(2) should block, similar to the wait(2) family. */ if (!task || (task->exit_state && thread_group_empty(task))) - poll_flags = POLLIN | POLLRDNORM; + poll_flags = EPOLLIN | EPOLLRDNORM; rcu_read_unlock(); return poll_flags; diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 04e83fdfbe80..a45cba7df0ae 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -814,6 +814,8 @@ EXPORT_SYMBOL_GPL(freq_qos_update_request); */ int freq_qos_remove_request(struct freq_qos_request *req) { + int ret; + if (!req) return -EINVAL; @@ -821,7 +823,11 @@ int freq_qos_remove_request(struct freq_qos_request *req) "%s() called for unknown object\n", __func__)) return -EINVAL; - return freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + req->qos = NULL; + req->type = 0; + + return ret; } EXPORT_SYMBOL_GPL(freq_qos_remove_request); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0f2eb3629070..44123b4d14e8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1065,7 +1065,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id) * affecting a valid clamp bucket, the next time it's enqueued, * it will already see the updated clamp bucket value. */ - if (!p->uclamp[clamp_id].active) { + if (p->uclamp[clamp_id].active) { uclamp_rq_dec_id(rq, p, clamp_id); uclamp_rq_inc_id(rq, p, clamp_id); } @@ -6019,10 +6019,11 @@ void init_idle(struct task_struct *idle, int cpu) struct rq *rq = cpu_rq(cpu); unsigned long flags; + __sched_fork(0, idle); + raw_spin_lock_irqsave(&idle->pi_lock, flags); raw_spin_lock(&rq->lock); - __sched_fork(0, idle); idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); idle->flags |= PF_IDLE; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 22a2fed29054..69a81a5709ff 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7548,6 +7548,19 @@ static void update_blocked_averages(int cpu) update_rq_clock(rq); /* + * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure + * that RT, DL and IRQ signals have been updated before updating CFS. + */ + curr_class = rq->curr->sched_class; + update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class); + update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class); + update_irq_load_avg(rq, 0); + + /* Don't need periodic decay once load/util_avg are null */ + if (others_have_blocked(rq)) + done = false; + + /* * Iterates the task_group tree in a bottom up fashion, see * list_add_leaf_cfs_rq() for details. */ @@ -7574,14 +7587,6 @@ static void update_blocked_averages(int cpu) done = false; } - curr_class = rq->curr->sched_class; - update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class); - update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class); - update_irq_load_avg(rq, 0); - /* Don't need periodic decay once load/util_avg are null */ - if (others_have_blocked(rq)) - done = false; - update_blocked_load_status(rq, !done); rq_unlock_irqrestore(rq, &rf); } @@ -7642,12 +7647,18 @@ static inline void update_blocked_averages(int cpu) rq_lock_irqsave(rq, &rf); update_rq_clock(rq); - update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); + /* + * update_cfs_rq_load_avg() can call cpufreq_update_util(). Make sure + * that RT, DL and IRQ signals have been updated before updating CFS. + */ curr_class = rq->curr->sched_class; update_rt_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &rt_sched_class); update_dl_rq_load_avg(rq_clock_pelt(rq), rq, curr_class == &dl_sched_class); update_irq_load_avg(rq, 0); + + update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); + update_blocked_load_status(rq, cfs_rq_has_blocked(cfs_rq) || others_have_blocked(rq)); rq_unlock_irqrestore(rq, &rf); } @@ -885,13 +885,13 @@ static int remove_stable_node(struct stable_node *stable_node) return 0; } - if (WARN_ON_ONCE(page_mapped(page))) { - /* - * This should not happen: but if it does, just refuse to let - * merge_across_nodes be switched - there is no need to panic. - */ - err = -EBUSY; - } else { + /* + * Page could be still mapped if this races with __mmput() running in + * between ksm_exit() and exit_mmap(). Just refuse to let + * merge_across_nodes/max_page_sharing be switched. + */ + err = -EBUSY; + if (!page_mapped(page)) { /* * The stable node did not yet appear stale to get_ksm_page(), * since that allows for an unmapped ksm page to be recognized diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 3b62a9ff8ea0..f307bd82d750 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -331,7 +331,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, unsigned long end_pfn) { for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { - if (unlikely(!pfn_valid(start_pfn))) + if (unlikely(!pfn_to_online_page(start_pfn))) continue; if (unlikely(pfn_to_nid(start_pfn) != nid)) @@ -356,7 +356,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, /* pfn is the end pfn of a memory section. */ pfn = end_pfn - 1; for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { - if (unlikely(!pfn_valid(pfn))) + if (unlikely(!pfn_to_online_page(pfn))) continue; if (unlikely(pfn_to_nid(pfn) != nid)) @@ -415,7 +415,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, */ pfn = zone_start_pfn; for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) { - if (unlikely(!pfn_valid(pfn))) + if (unlikely(!pfn_to_online_page(pfn))) continue; if (page_zone(pfn_to_page(pfn)) != zone) @@ -471,6 +471,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn, struct pglist_data *pgdat = zone->zone_pgdat; unsigned long flags; +#ifdef CONFIG_ZONE_DEVICE + /* + * Zone shrinking code cannot properly deal with ZONE_DEVICE. So + * we will not try to shrink the zones - which is okay as + * set_zone_contiguous() cannot deal with ZONE_DEVICE either way. + */ + if (zone_idx(zone) == ZONE_DEVICE) + return; +#endif + pgdat_resize_lock(zone->zone_pgdat, &flags); shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); update_pgdat_span(pgdat); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a3c70e275f4e..4a7d7459c4f9 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2672,6 +2672,26 @@ void *vzalloc_node(unsigned long size, int node) EXPORT_SYMBOL(vzalloc_node); /** + * vmalloc_user_node_flags - allocate memory for userspace on a specific node + * @size: allocation size + * @node: numa node + * @flags: flags for the page level allocator + * + * The resulting memory area is zeroed so it can be mapped to userspace + * without leaking data. + * + * Return: pointer to the allocated memory or %NULL on error + */ +void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags) +{ + return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, + flags | __GFP_ZERO, PAGE_KERNEL, + VM_USERMAP, node, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(vmalloc_user_node_flags); + +/** * vmalloc_exec - allocate virtually contiguous, executable memory * @size: allocation size * diff --git a/net/Kconfig b/net/Kconfig index 3101bfcbdd7a..bd191f978a23 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -258,7 +258,7 @@ config XPS default y config HWBM - bool + bool config CGROUP_NET_PRIO bool "Network priority cgroup" @@ -309,12 +309,12 @@ config BPF_STREAM_PARSER select STREAM_PARSER select NET_SOCK_MSG ---help--- - Enabling this allows a stream parser to be used with - BPF_MAP_TYPE_SOCKMAP. + Enabling this allows a stream parser to be used with + BPF_MAP_TYPE_SOCKMAP. - BPF_MAP_TYPE_SOCKMAP provides a map type to use with network sockets. - It can be used to enforce socket policy, implement socket redirects, - etc. + BPF_MAP_TYPE_SOCKMAP provides a map type to use with network sockets. + It can be used to enforce socket policy, implement socket redirects, + etc. config NET_FLOW_LIMIT bool @@ -349,12 +349,12 @@ config NET_DROP_MONITOR tristate "Network packet drop alerting service" depends on INET && TRACEPOINTS ---help--- - This feature provides an alerting service to userspace in the - event that packets are discarded in the network stack. Alerts - are broadcast via netlink socket to any listening user space - process. If you don't need network drop alerts, or if you are ok - just checking the various proc files and other utilities for - drop statistics, say N here. + This feature provides an alerting service to userspace in the + event that packets are discarded in the network stack. Alerts + are broadcast via netlink socket to any listening user space + process. If you don't need network drop alerts, or if you are ok + just checking the various proc files and other utilities for + drop statistics, say N here. endmenu @@ -433,7 +433,7 @@ config NET_DEVLINK imply NET_DROP_MONITOR config PAGE_POOL - bool + bool config FAILOVER tristate "Generic failover module" diff --git a/net/atm/clip.c b/net/atm/clip.c index a7972da7235d..294cb9efe3d3 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -89,7 +89,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc) struct clip_vcc **walk; if (!entry) { - pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); + pr_err("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); return; } netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ @@ -109,10 +109,10 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc) error = neigh_update(entry->neigh, NULL, NUD_NONE, NEIGH_UPDATE_F_ADMIN, 0); if (error) - pr_crit("neigh_update failed with %d\n", error); + pr_err("neigh_update failed with %d\n", error); goto out; } - pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc); + pr_err("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc); out: netif_tx_unlock_bh(entry->neigh->dev); } diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 0be4497cb832..915c2d6f7fb9 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -105,6 +105,40 @@ out: return err; } +/* Integer types of various sizes and pointer combinations cover variety of + * architecture dependent calling conventions. 7+ can be supported in the + * future. + */ +int noinline bpf_fentry_test1(int a) +{ + return a + 1; +} + +int noinline bpf_fentry_test2(int a, u64 b) +{ + return a + b; +} + +int noinline bpf_fentry_test3(char a, int b, u64 c) +{ + return a + b + c; +} + +int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) +{ + return (long)a + b + c + d; +} + +int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) +{ + return a + (long)b + c + d + e; +} + +int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) +{ + return a + (long)b + c + d + (long)e + f; +} + static void *bpf_test_init(const union bpf_attr *kattr, u32 size, u32 headroom, u32 tailroom) { @@ -122,6 +156,15 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 size, kfree(data); return ERR_PTR(-EFAULT); } + if (bpf_fentry_test1(1) != 2 || + bpf_fentry_test2(2, 3) != 5 || + bpf_fentry_test3(4, 5, 6) != 15 || + bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || + bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || + bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111) { + kfree(data); + return ERR_PTR(-EFAULT); + } return data; } diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index da5639a5bd3b..458be6b3eda9 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -798,7 +798,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) * Try to grab map refcnt to make sure that it's still * alive and prevent concurrent removal. */ - map = bpf_map_inc_not_zero(&smap->map, false); + map = bpf_map_inc_not_zero(&smap->map); if (IS_ERR(map)) continue; diff --git a/net/core/filter.c b/net/core/filter.c index fc303abec8fa..49ded4a7588a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3816,7 +3816,7 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = { .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; -static u32 bpf_skb_output_btf_ids[5]; +static int bpf_skb_output_btf_ids[5]; const struct bpf_func_proto bpf_skb_output_proto = { .func = bpf_skb_event_output, .gpl_only = true, @@ -8684,16 +8684,6 @@ out: } #ifdef CONFIG_INET -struct sk_reuseport_kern { - struct sk_buff *skb; - struct sock *sk; - struct sock *selected_sk; - void *data_end; - u32 hash; - u32 reuseport_id; - bool bind_inany; -}; - static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, struct sock_reuseport *reuse, struct sock *sk, struct sk_buff *skb, diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 865ba6ca16eb..ae3bcb1540ec 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -923,21 +923,23 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, "rx-%u", index); if (error) - return error; + goto err; dev_hold(queue->dev); if (dev->sysfs_rx_queue_group) { error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); - if (error) { - kobject_put(kobj); - return error; - } + if (error) + goto err; } kobject_uevent(kobj, KOBJ_ADD); return error; + +err: + kobject_put(kobj); + return error; } #endif /* CONFIG_SYSFS */ @@ -1461,21 +1463,22 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, "tx-%u", index); if (error) - return error; + goto err; dev_hold(queue->dev); #ifdef CONFIG_BQL error = sysfs_create_group(kobj, &dql_group); - if (error) { - kobject_put(kobj); - return error; - } + if (error) + goto err; #endif kobject_uevent(kobj, KOBJ_ADD); - return 0; + +err: + kobject_put(kobj); + return error; } #endif /* CONFIG_SYSFS */ diff --git a/net/core/page_pool.c b/net/core/page_pool.c index dfc2501c35d9..a6aefe989043 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -47,6 +47,21 @@ static int page_pool_init(struct page_pool *pool, (pool->p.dma_dir != DMA_BIDIRECTIONAL)) return -EINVAL; + if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { + /* In order to request DMA-sync-for-device the page + * needs to be mapped + */ + if (!(pool->p.flags & PP_FLAG_DMA_MAP)) + return -EINVAL; + + if (!pool->p.max_len) + return -EINVAL; + + /* pool->p.offset has to be set according to the address + * offset used by the DMA engine to start copying rx data + */ + } + if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) return -ENOMEM; @@ -115,6 +130,16 @@ static struct page *__page_pool_get_cached(struct page_pool *pool) return page; } +static void page_pool_dma_sync_for_device(struct page_pool *pool, + struct page *page, + unsigned int dma_sync_size) +{ + dma_sync_size = min(dma_sync_size, pool->p.max_len); + dma_sync_single_range_for_device(pool->p.dev, page->dma_addr, + pool->p.offset, dma_sync_size, + pool->p.dma_dir); +} + /* slow path */ noinline static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, @@ -159,6 +184,9 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, } page->dma_addr = dma; + if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) + page_pool_dma_sync_for_device(pool, page, pool->p.max_len); + skip_dma_map: /* Track how many pages are held 'in-flight' */ pool->pages_state_hold_cnt++; @@ -200,7 +228,7 @@ static s32 page_pool_inflight(struct page_pool *pool) inflight = _distance(hold_cnt, release_cnt); - trace_page_pool_inflight(pool, inflight, hold_cnt, release_cnt); + trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight); return inflight; @@ -281,8 +309,19 @@ static bool __page_pool_recycle_direct(struct page *page, return true; } -void __page_pool_put_page(struct page_pool *pool, - struct page *page, bool allow_direct) +/* page is NOT reusable when: + * 1) allocated when system is under some pressure. (page_is_pfmemalloc) + * 2) belongs to a different NUMA node than pool->p.nid. + * + * To update pool->p.nid users must call page_pool_update_nid. + */ +static bool pool_page_reusable(struct page_pool *pool, struct page *page) +{ + return !page_is_pfmemalloc(page) && page_to_nid(page) == pool->p.nid; +} + +void __page_pool_put_page(struct page_pool *pool, struct page *page, + unsigned int dma_sync_size, bool allow_direct) { /* This allocator is optimized for the XDP mode that uses * one-frame-per-page, but have fallbacks that act like the @@ -290,9 +329,14 @@ void __page_pool_put_page(struct page_pool *pool, * * refcnt == 1 means page_pool owns page, and can recycle it. */ - if (likely(page_ref_count(page) == 1)) { + if (likely(page_ref_count(page) == 1 && + pool_page_reusable(pool, page))) { /* Read barrier done in page_ref_count / READ_ONCE */ + if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) + page_pool_dma_sync_for_device(pool, page, + dma_sync_size); + if (allow_direct && in_serving_softirq()) if (__page_pool_recycle_direct(page, pool)) return; @@ -349,10 +393,13 @@ static void page_pool_free(struct page_pool *pool) kfree(pool); } -static void page_pool_scrub(struct page_pool *pool) +static void page_pool_empty_alloc_cache_once(struct page_pool *pool) { struct page *page; + if (pool->destroy_cnt) + return; + /* Empty alloc cache, assume caller made sure this is * no-longer in use, and page_pool_alloc_pages() cannot be * call concurrently. @@ -361,6 +408,12 @@ static void page_pool_scrub(struct page_pool *pool) page = pool->alloc.cache[--pool->alloc.count]; __page_pool_return_page(pool, page); } +} + +static void page_pool_scrub(struct page_pool *pool) +{ + page_pool_empty_alloc_cache_once(pool); + pool->destroy_cnt++; /* No more consumers should exist, but producers could still * be in-flight. @@ -427,3 +480,11 @@ void page_pool_destroy(struct page_pool *pool) schedule_delayed_work(&pool->release_dw, DEFER_TIME); } EXPORT_SYMBOL(page_pool_destroy); + +/* Caller must provide appropriate safe context, e.g. NAPI. */ +void page_pool_update_nid(struct page_pool *pool, int new_nid) +{ + trace_page_pool_update_nid(pool, new_nid); + pool->p.nid = new_nid; +} +EXPORT_SYMBOL(page_pool_update_nid); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 000eddb1207d..9f7aa448bd11 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2251,6 +2251,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_MAC]) { struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); + if (ivm->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_mac) err = ops->ndo_set_vf_mac(dev, ivm->vf, @@ -2262,6 +2264,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_VLAN]) { struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); + if (ivv->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_vlan) err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, @@ -2294,6 +2298,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (len == 0) return -EINVAL; + if (ivvl[0]->vf >= INT_MAX) + return -EINVAL; err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, ivvl[0]->qos, ivvl[0]->vlan_proto); if (err < 0) @@ -2304,6 +2310,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); struct ifla_vf_info ivf; + if (ivt->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_get_vf_config) err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); @@ -2322,6 +2330,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_RATE]) { struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); + if (ivt->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_rate) err = ops->ndo_set_vf_rate(dev, ivt->vf, @@ -2334,6 +2344,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_SPOOFCHK]) { struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); + if (ivs->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_spoofchk) err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, @@ -2345,6 +2357,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_LINK_STATE]) { struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); + if (ivl->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_link_state) err = ops->ndo_set_vf_link_state(dev, ivl->vf, @@ -2358,6 +2372,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) err = -EOPNOTSUPP; ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); + if (ivrssq_en->vf >= INT_MAX) + return -EINVAL; if (ops->ndo_set_vf_rss_query_en) err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, ivrssq_en->setting); @@ -2368,6 +2384,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_TRUST]) { struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); + if (ivt->vf >= INT_MAX) + return -EINVAL; err = -EOPNOTSUPP; if (ops->ndo_set_vf_trust) err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); @@ -2378,15 +2396,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) if (tb[IFLA_VF_IB_NODE_GUID]) { struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); + if (ivt->vf >= INT_MAX) + return -EINVAL; if (!ops->ndo_set_vf_guid) return -EOPNOTSUPP; - return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); } if (tb[IFLA_VF_IB_PORT_GUID]) { struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); + if (ivt->vf >= INT_MAX) + return -EINVAL; if (!ops->ndo_set_vf_guid) return -EOPNOTSUPP; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index ad31e4e53d0a..a469d2124f3f 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -793,15 +793,18 @@ static void sk_psock_strp_data_ready(struct sock *sk) static void sk_psock_write_space(struct sock *sk) { struct sk_psock *psock; - void (*write_space)(struct sock *sk); + void (*write_space)(struct sock *sk) = NULL; rcu_read_lock(); psock = sk_psock(sk); - if (likely(psock && sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))) - schedule_work(&psock->work); - write_space = psock->saved_write_space; + if (likely(psock)) { + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + schedule_work(&psock->work); + write_space = psock->saved_write_space; + } rcu_read_unlock(); - write_space(sk); + if (write_space) + write_space(sk); } int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) diff --git a/net/core/xdp.c b/net/core/xdp.c index 8e405abaf05a..e334fad0a6b8 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -73,11 +73,6 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) /* Allow this ID to be reused */ ida_simple_remove(&mem_id_pool, xa->mem.id); - /* Poison memory */ - xa->mem.id = 0xFFFF; - xa->mem.type = 0xF0F0; - xa->allocator = (void *)0xDEAD9001; - kfree(xa); } diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index 078d4790669d..8e3e7283d430 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -137,9 +137,11 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, struct net_device *netdev) { struct dsa_port *dp = dsa_slave_to_port(netdev); - u64 bypass, dest, src, qos_class; + u64 bypass, dest, src, qos_class, rew_op; struct dsa_switch *ds = dp->ds; int port = dp->index; + struct ocelot *ocelot = ds->priv; + struct ocelot_port *ocelot_port = ocelot->ports[port]; u8 *injection; if (unlikely(skb_cow_head(skb, OCELOT_TAG_LEN) < 0)) { @@ -161,6 +163,16 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0); packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0); + if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + rew_op = ocelot_port->ptp_cmd; + if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { + rew_op |= (ocelot_port->ts_id % 4) << 3; + ocelot_port->ts_id++; + } + + packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0); + } + return skb; } diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 03381f3e12ba..fc816b187170 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -180,8 +180,8 @@ config NET_IPIP config NET_IPGRE_DEMUX tristate "IP: GRE demultiplexer" help - This is helper module to demultiplex GRE packets on GRE version field criteria. - Required by ip_gre and pptp modules. + This is helper module to demultiplex GRE packets on GRE version field criteria. + Required by ip_gre and pptp modules. config NET_IP_TUNNEL tristate @@ -459,200 +459,200 @@ config TCP_CONG_BIC tristate "Binary Increase Congestion (BIC) control" default m ---help--- - BIC-TCP is a sender-side only change that ensures a linear RTT - fairness under large windows while offering both scalability and - bounded TCP-friendliness. The protocol combines two schemes - called additive increase and binary search increase. When the - congestion window is large, additive increase with a large - increment ensures linear RTT fairness as well as good - scalability. Under small congestion windows, binary search - increase provides TCP friendliness. - See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/ + BIC-TCP is a sender-side only change that ensures a linear RTT + fairness under large windows while offering both scalability and + bounded TCP-friendliness. The protocol combines two schemes + called additive increase and binary search increase. When the + congestion window is large, additive increase with a large + increment ensures linear RTT fairness as well as good + scalability. Under small congestion windows, binary search + increase provides TCP friendliness. + See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/ config TCP_CONG_CUBIC tristate "CUBIC TCP" default y ---help--- - This is version 2.0 of BIC-TCP which uses a cubic growth function - among other techniques. - See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/cubic-paper.pdf + This is version 2.0 of BIC-TCP which uses a cubic growth function + among other techniques. + See http://www.csc.ncsu.edu/faculty/rhee/export/bitcp/cubic-paper.pdf config TCP_CONG_WESTWOOD tristate "TCP Westwood+" default m ---help--- - TCP Westwood+ is a sender-side only modification of the TCP Reno - protocol stack that optimizes the performance of TCP congestion - control. It is based on end-to-end bandwidth estimation to set - congestion window and slow start threshold after a congestion - episode. Using this estimation, TCP Westwood+ adaptively sets a - slow start threshold and a congestion window which takes into - account the bandwidth used at the time congestion is experienced. - TCP Westwood+ significantly increases fairness wrt TCP Reno in - wired networks and throughput over wireless links. + TCP Westwood+ is a sender-side only modification of the TCP Reno + protocol stack that optimizes the performance of TCP congestion + control. It is based on end-to-end bandwidth estimation to set + congestion window and slow start threshold after a congestion + episode. Using this estimation, TCP Westwood+ adaptively sets a + slow start threshold and a congestion window which takes into + account the bandwidth used at the time congestion is experienced. + TCP Westwood+ significantly increases fairness wrt TCP Reno in + wired networks and throughput over wireless links. config TCP_CONG_HTCP tristate "H-TCP" default m ---help--- - H-TCP is a send-side only modifications of the TCP Reno - protocol stack that optimizes the performance of TCP - congestion control for high speed network links. It uses a - modeswitch to change the alpha and beta parameters of TCP Reno - based on network conditions and in a way so as to be fair with - other Reno and H-TCP flows. + H-TCP is a send-side only modifications of the TCP Reno + protocol stack that optimizes the performance of TCP + congestion control for high speed network links. It uses a + modeswitch to change the alpha and beta parameters of TCP Reno + based on network conditions and in a way so as to be fair with + other Reno and H-TCP flows. config TCP_CONG_HSTCP tristate "High Speed TCP" default n ---help--- - Sally Floyd's High Speed TCP (RFC 3649) congestion control. - A modification to TCP's congestion control mechanism for use - with large congestion windows. A table indicates how much to - increase the congestion window by when an ACK is received. - For more detail see http://www.icir.org/floyd/hstcp.html + Sally Floyd's High Speed TCP (RFC 3649) congestion control. + A modification to TCP's congestion control mechanism for use + with large congestion windows. A table indicates how much to + increase the congestion window by when an ACK is received. + For more detail see http://www.icir.org/floyd/hstcp.html config TCP_CONG_HYBLA tristate "TCP-Hybla congestion control algorithm" default n ---help--- - TCP-Hybla is a sender-side only change that eliminates penalization of - long-RTT, large-bandwidth connections, like when satellite legs are - involved, especially when sharing a common bottleneck with normal - terrestrial connections. + TCP-Hybla is a sender-side only change that eliminates penalization of + long-RTT, large-bandwidth connections, like when satellite legs are + involved, especially when sharing a common bottleneck with normal + terrestrial connections. config TCP_CONG_VEGAS tristate "TCP Vegas" default n ---help--- - TCP Vegas is a sender-side only change to TCP that anticipates - the onset of congestion by estimating the bandwidth. TCP Vegas - adjusts the sending rate by modifying the congestion - window. TCP Vegas should provide less packet loss, but it is - not as aggressive as TCP Reno. + TCP Vegas is a sender-side only change to TCP that anticipates + the onset of congestion by estimating the bandwidth. TCP Vegas + adjusts the sending rate by modifying the congestion + window. TCP Vegas should provide less packet loss, but it is + not as aggressive as TCP Reno. config TCP_CONG_NV - tristate "TCP NV" - default n - ---help--- - TCP NV is a follow up to TCP Vegas. It has been modified to deal with - 10G networks, measurement noise introduced by LRO, GRO and interrupt - coalescence. In addition, it will decrease its cwnd multiplicatively - instead of linearly. + tristate "TCP NV" + default n + ---help--- + TCP NV is a follow up to TCP Vegas. It has been modified to deal with + 10G networks, measurement noise introduced by LRO, GRO and interrupt + coalescence. In addition, it will decrease its cwnd multiplicatively + instead of linearly. - Note that in general congestion avoidance (cwnd decreased when # packets - queued grows) cannot coexist with congestion control (cwnd decreased only - when there is packet loss) due to fairness issues. One scenario when they - can coexist safely is when the CA flows have RTTs << CC flows RTTs. + Note that in general congestion avoidance (cwnd decreased when # packets + queued grows) cannot coexist with congestion control (cwnd decreased only + when there is packet loss) due to fairness issues. One scenario when they + can coexist safely is when the CA flows have RTTs << CC flows RTTs. - For further details see http://www.brakmo.org/networking/tcp-nv/ + For further details see http://www.brakmo.org/networking/tcp-nv/ config TCP_CONG_SCALABLE tristate "Scalable TCP" default n ---help--- - Scalable TCP is a sender-side only change to TCP which uses a - MIMD congestion control algorithm which has some nice scaling - properties, though is known to have fairness issues. - See http://www.deneholme.net/tom/scalable/ + Scalable TCP is a sender-side only change to TCP which uses a + MIMD congestion control algorithm which has some nice scaling + properties, though is known to have fairness issues. + See http://www.deneholme.net/tom/scalable/ config TCP_CONG_LP tristate "TCP Low Priority" default n ---help--- - TCP Low Priority (TCP-LP), a distributed algorithm whose goal is - to utilize only the excess network bandwidth as compared to the - ``fair share`` of bandwidth as targeted by TCP. - See http://www-ece.rice.edu/networks/TCP-LP/ + TCP Low Priority (TCP-LP), a distributed algorithm whose goal is + to utilize only the excess network bandwidth as compared to the + ``fair share`` of bandwidth as targeted by TCP. + See http://www-ece.rice.edu/networks/TCP-LP/ config TCP_CONG_VENO tristate "TCP Veno" default n ---help--- - TCP Veno is a sender-side only enhancement of TCP to obtain better - throughput over wireless networks. TCP Veno makes use of state - distinguishing to circumvent the difficult judgment of the packet loss - type. TCP Veno cuts down less congestion window in response to random - loss packets. - See <http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1177186> + TCP Veno is a sender-side only enhancement of TCP to obtain better + throughput over wireless networks. TCP Veno makes use of state + distinguishing to circumvent the difficult judgment of the packet loss + type. TCP Veno cuts down less congestion window in response to random + loss packets. + See <http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1177186> config TCP_CONG_YEAH tristate "YeAH TCP" select TCP_CONG_VEGAS default n ---help--- - YeAH-TCP is a sender-side high-speed enabled TCP congestion control - algorithm, which uses a mixed loss/delay approach to compute the - congestion window. It's design goals target high efficiency, - internal, RTT and Reno fairness, resilience to link loss while - keeping network elements load as low as possible. + YeAH-TCP is a sender-side high-speed enabled TCP congestion control + algorithm, which uses a mixed loss/delay approach to compute the + congestion window. It's design goals target high efficiency, + internal, RTT and Reno fairness, resilience to link loss while + keeping network elements load as low as possible. - For further details look here: - http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf + For further details look here: + http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf config TCP_CONG_ILLINOIS tristate "TCP Illinois" default n ---help--- - TCP-Illinois is a sender-side modification of TCP Reno for - high speed long delay links. It uses round-trip-time to - adjust the alpha and beta parameters to achieve a higher average - throughput and maintain fairness. + TCP-Illinois is a sender-side modification of TCP Reno for + high speed long delay links. It uses round-trip-time to + adjust the alpha and beta parameters to achieve a higher average + throughput and maintain fairness. - For further details see: - http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html + For further details see: + http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html config TCP_CONG_DCTCP tristate "DataCenter TCP (DCTCP)" default n ---help--- - DCTCP leverages Explicit Congestion Notification (ECN) in the network to - provide multi-bit feedback to the end hosts. It is designed to provide: + DCTCP leverages Explicit Congestion Notification (ECN) in the network to + provide multi-bit feedback to the end hosts. It is designed to provide: - - High burst tolerance (incast due to partition/aggregate), - - Low latency (short flows, queries), - - High throughput (continuous data updates, large file transfers) with - commodity, shallow-buffered switches. + - High burst tolerance (incast due to partition/aggregate), + - Low latency (short flows, queries), + - High throughput (continuous data updates, large file transfers) with + commodity, shallow-buffered switches. - All switches in the data center network running DCTCP must support - ECN marking and be configured for marking when reaching defined switch - buffer thresholds. The default ECN marking threshold heuristic for - DCTCP on switches is 20 packets (30KB) at 1Gbps, and 65 packets - (~100KB) at 10Gbps, but might need further careful tweaking. + All switches in the data center network running DCTCP must support + ECN marking and be configured for marking when reaching defined switch + buffer thresholds. The default ECN marking threshold heuristic for + DCTCP on switches is 20 packets (30KB) at 1Gbps, and 65 packets + (~100KB) at 10Gbps, but might need further careful tweaking. - For further details see: - http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf + For further details see: + http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf config TCP_CONG_CDG tristate "CAIA Delay-Gradient (CDG)" default n ---help--- - CAIA Delay-Gradient (CDG) is a TCP congestion control that modifies - the TCP sender in order to: + CAIA Delay-Gradient (CDG) is a TCP congestion control that modifies + the TCP sender in order to: o Use the delay gradient as a congestion signal. o Back off with an average probability that is independent of the RTT. o Coexist with flows that use loss-based congestion control. o Tolerate packet loss unrelated to congestion. - For further details see: - D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using - delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg + For further details see: + D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using + delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg config TCP_CONG_BBR tristate "BBR TCP" default n ---help--- - BBR (Bottleneck Bandwidth and RTT) TCP congestion control aims to - maximize network utilization and minimize queues. It builds an explicit - model of the the bottleneck delivery rate and path round-trip - propagation delay. It tolerates packet loss and delay unrelated to - congestion. It can operate over LAN, WAN, cellular, wifi, or cable - modem links. It can coexist with flows that use loss-based congestion - control, and can operate with shallow buffers, deep buffers, - bufferbloat, policers, or AQM schemes that do not provide a delay - signal. It requires the fq ("Fair Queue") pacing packet scheduler. + BBR (Bottleneck Bandwidth and RTT) TCP congestion control aims to + maximize network utilization and minimize queues. It builds an explicit + model of the the bottleneck delivery rate and path round-trip + propagation delay. It tolerates packet loss and delay unrelated to + congestion. It can operate over LAN, WAN, cellular, wifi, or cable + modem links. It can coexist with flows that use loss-based congestion + control, and can operate with shallow buffers, deep buffers, + bufferbloat, policers, or AQM schemes that do not provide a delay + signal. It requires the fq ("Fair Queue") pacing packet scheduler. choice prompt "Default TCP congestion control" diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 71c78d223dfd..577db1d50a24 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -70,11 +70,6 @@ fail: fib_free_table(main_table); return -ENOMEM; } - -static bool fib4_has_custom_rules(struct net *net) -{ - return false; -} #else struct fib_table *fib_new_table(struct net *net, u32 id) @@ -131,11 +126,6 @@ struct fib_table *fib_get_table(struct net *net, u32 id) } return NULL; } - -static bool fib4_has_custom_rules(struct net *net) -{ - return net->ipv4.fib_has_custom_rules; -} #endif /* CONFIG_IP_MULTIPLE_TABLES */ static void fib_replace_table(struct net *net, struct fib_table *old, diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 10636fb6093e..572b6307a2df 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -340,6 +340,8 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, iph->saddr, iph->daddr, tpi->key); if (tunnel) { + const struct iphdr *tnl_params; + if (__iptunnel_pull_header(skb, hdr_len, tpi->proto, raw_proto, false) < 0) goto drop; @@ -348,7 +350,9 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, skb_pop_mac_header(skb); else skb_reset_mac_header(skb); - if (tunnel->collect_md) { + + tnl_params = &tunnel->parms.iph; + if (tunnel->collect_md || tnl_params->daddr == 0) { __be16 flags; __be64 tun_id; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 24a95126e698..aa438c6758a7 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -302,16 +302,31 @@ drop: return true; } +static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, + const struct sk_buff *hint) +{ + return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr && + ip_hdr(hint)->tos == iph->tos; +} + INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *)); static int ip_rcv_finish_core(struct net *net, struct sock *sk, - struct sk_buff *skb, struct net_device *dev) + struct sk_buff *skb, struct net_device *dev, + const struct sk_buff *hint) { const struct iphdr *iph = ip_hdr(skb); int (*edemux)(struct sk_buff *skb); struct rtable *rt; int err; + if (ip_can_use_hint(skb, iph, hint)) { + err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos, + dev, hint); + if (unlikely(err)) + goto drop_error; + } + if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk && @@ -408,7 +423,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) if (!skb) return NET_RX_SUCCESS; - ret = ip_rcv_finish_core(net, sk, skb, dev); + ret = ip_rcv_finish_core(net, sk, skb, dev, NULL); if (ret != NET_RX_DROP) ret = dst_input(skb); return ret; @@ -535,11 +550,20 @@ static void ip_sublist_rcv_finish(struct list_head *head) } } +static struct sk_buff *ip_extract_route_hint(const struct net *net, + struct sk_buff *skb, int rt_type) +{ + if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST) + return NULL; + + return skb; +} + static void ip_list_rcv_finish(struct net *net, struct sock *sk, struct list_head *head) { + struct sk_buff *skb, *next, *hint = NULL; struct dst_entry *curr_dst = NULL; - struct sk_buff *skb, *next; struct list_head sublist; INIT_LIST_HEAD(&sublist); @@ -554,11 +578,14 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk, skb = l3mdev_ip_rcv(skb); if (!skb) continue; - if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP) + if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP) continue; dst = skb_dst(skb); if (curr_dst != dst) { + hint = ip_extract_route_hint(net, skb, + ((struct rtable *)dst)->rt_type); + /* dispatch old sublist */ if (!list_empty(&sublist)) ip_sublist_rcv_finish(&sublist); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 3d8baaaf7086..9d83cb320dcb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -422,7 +422,7 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb) int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net_device *dev = skb_dst(skb)->dev; + struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev; IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); @@ -430,7 +430,7 @@ int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb) skb->protocol = htons(ETH_P_IP); return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, - net, sk, skb, NULL, dev, + net, sk, skb, indev, dev, ip_finish_output, !(IPCB(skb)->flags & IPSKB_REROUTED)); } diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index c724fb30d048..47f8b947eef1 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -215,6 +215,7 @@ void ip_tunnel_get_stats64(struct net_device *dev, EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64); static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = { + [LWTUNNEL_IP_UNSPEC] = { .strict_start_type = LWTUNNEL_IP_OPTS }, [LWTUNNEL_IP_ID] = { .type = NLA_U64 }, [LWTUNNEL_IP_DST] = { .type = NLA_U32 }, [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, @@ -251,7 +252,7 @@ erspan_opt_policy[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1] = { }; static int ip_tun_parse_opts_geneve(struct nlattr *attr, - struct ip_tunnel_info *info, + struct ip_tunnel_info *info, int opts_len, struct netlink_ext_ack *extack) { struct nlattr *tb[LWTUNNEL_IP_OPT_GENEVE_MAX + 1]; @@ -273,7 +274,7 @@ static int ip_tun_parse_opts_geneve(struct nlattr *attr, return -EINVAL; if (info) { - struct geneve_opt *opt = ip_tunnel_info_opts(info); + struct geneve_opt *opt = ip_tunnel_info_opts(info) + opts_len; memcpy(opt->opt_data, nla_data(attr), data_len); opt->length = data_len / 4; @@ -288,7 +289,7 @@ static int ip_tun_parse_opts_geneve(struct nlattr *attr, } static int ip_tun_parse_opts_vxlan(struct nlattr *attr, - struct ip_tunnel_info *info, + struct ip_tunnel_info *info, int opts_len, struct netlink_ext_ack *extack) { struct nlattr *tb[LWTUNNEL_IP_OPT_VXLAN_MAX + 1]; @@ -303,7 +304,8 @@ static int ip_tun_parse_opts_vxlan(struct nlattr *attr, return -EINVAL; if (info) { - struct vxlan_metadata *md = ip_tunnel_info_opts(info); + struct vxlan_metadata *md = + ip_tunnel_info_opts(info) + opts_len; attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP]; md->gbp = nla_get_u32(attr); @@ -314,11 +316,12 @@ static int ip_tun_parse_opts_vxlan(struct nlattr *attr, } static int ip_tun_parse_opts_erspan(struct nlattr *attr, - struct ip_tunnel_info *info, + struct ip_tunnel_info *info, int opts_len, struct netlink_ext_ack *extack) { struct nlattr *tb[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1]; int err; + u8 ver; err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_ERSPAN_MAX, attr, erspan_opt_policy, extack); @@ -328,23 +331,31 @@ static int ip_tun_parse_opts_erspan(struct nlattr *attr, if (!tb[LWTUNNEL_IP_OPT_ERSPAN_VER]) return -EINVAL; - if (info) { - struct erspan_metadata *md = ip_tunnel_info_opts(info); + ver = nla_get_u8(tb[LWTUNNEL_IP_OPT_ERSPAN_VER]); + if (ver == 1) { + if (!tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX]) + return -EINVAL; + } else if (ver == 2) { + if (!tb[LWTUNNEL_IP_OPT_ERSPAN_DIR] || + !tb[LWTUNNEL_IP_OPT_ERSPAN_HWID]) + return -EINVAL; + } else { + return -EINVAL; + } - attr = tb[LWTUNNEL_IP_OPT_ERSPAN_VER]; - md->version = nla_get_u8(attr); + if (info) { + struct erspan_metadata *md = + ip_tunnel_info_opts(info) + opts_len; - if (md->version == 1 && tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX]) { + md->version = ver; + if (ver == 1) { attr = tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX]; md->u.index = nla_get_be32(attr); - } else if (md->version == 2 && tb[LWTUNNEL_IP_OPT_ERSPAN_DIR] && - tb[LWTUNNEL_IP_OPT_ERSPAN_HWID]) { + } else { attr = tb[LWTUNNEL_IP_OPT_ERSPAN_DIR]; md->u.md2.dir = nla_get_u8(attr); attr = tb[LWTUNNEL_IP_OPT_ERSPAN_HWID]; set_hwid(&md->u.md2, nla_get_u8(attr)); - } else { - return -EINVAL; } info->key.tun_flags |= TUNNEL_ERSPAN_OPT; @@ -356,30 +367,57 @@ static int ip_tun_parse_opts_erspan(struct nlattr *attr, static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info, struct netlink_ext_ack *extack) { - struct nlattr *tb[LWTUNNEL_IP_OPTS_MAX + 1]; - int err; + int err, rem, opt_len, opts_len = 0, type = 0; + struct nlattr *nla; if (!attr) return 0; - err = nla_parse_nested(tb, LWTUNNEL_IP_OPTS_MAX, attr, - ip_opts_policy, extack); + err = nla_validate(nla_data(attr), nla_len(attr), LWTUNNEL_IP_OPTS_MAX, + ip_opts_policy, extack); if (err) return err; - if (tb[LWTUNNEL_IP_OPTS_GENEVE]) - err = ip_tun_parse_opts_geneve(tb[LWTUNNEL_IP_OPTS_GENEVE], - info, extack); - else if (tb[LWTUNNEL_IP_OPTS_VXLAN]) - err = ip_tun_parse_opts_vxlan(tb[LWTUNNEL_IP_OPTS_VXLAN], - info, extack); - else if (tb[LWTUNNEL_IP_OPTS_ERSPAN]) - err = ip_tun_parse_opts_erspan(tb[LWTUNNEL_IP_OPTS_ERSPAN], - info, extack); - else - err = -EINVAL; + nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) { + switch (nla_type(nla)) { + case LWTUNNEL_IP_OPTS_GENEVE: + if (type && type != TUNNEL_GENEVE_OPT) + return -EINVAL; + opt_len = ip_tun_parse_opts_geneve(nla, info, opts_len, + extack); + if (opt_len < 0) + return opt_len; + opts_len += opt_len; + if (opts_len > IP_TUNNEL_OPTS_MAX) + return -EINVAL; + type = TUNNEL_GENEVE_OPT; + break; + case LWTUNNEL_IP_OPTS_VXLAN: + if (type) + return -EINVAL; + opt_len = ip_tun_parse_opts_vxlan(nla, info, opts_len, + extack); + if (opt_len < 0) + return opt_len; + opts_len += opt_len; + type = TUNNEL_VXLAN_OPT; + break; + case LWTUNNEL_IP_OPTS_ERSPAN: + if (type) + return -EINVAL; + opt_len = ip_tun_parse_opts_erspan(nla, info, opts_len, + extack); + if (opt_len < 0) + return opt_len; + opts_len += opt_len; + type = TUNNEL_ERSPAN_OPT; + break; + default: + return -EINVAL; + } + } - return err; + return opts_len; } static int ip_tun_get_optlen(struct nlattr *attr, @@ -477,18 +515,23 @@ static int ip_tun_fill_encap_opts_geneve(struct sk_buff *skb, { struct geneve_opt *opt; struct nlattr *nest; + int offset = 0; nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_GENEVE); if (!nest) return -ENOMEM; - opt = ip_tunnel_info_opts(tun_info); - if (nla_put_be16(skb, LWTUNNEL_IP_OPT_GENEVE_CLASS, opt->opt_class) || - nla_put_u8(skb, LWTUNNEL_IP_OPT_GENEVE_TYPE, opt->type) || - nla_put(skb, LWTUNNEL_IP_OPT_GENEVE_DATA, opt->length * 4, - opt->opt_data)) { - nla_nest_cancel(skb, nest); - return -ENOMEM; + while (tun_info->options_len > offset) { + opt = ip_tunnel_info_opts(tun_info) + offset; + if (nla_put_be16(skb, LWTUNNEL_IP_OPT_GENEVE_CLASS, + opt->opt_class) || + nla_put_u8(skb, LWTUNNEL_IP_OPT_GENEVE_TYPE, opt->type) || + nla_put(skb, LWTUNNEL_IP_OPT_GENEVE_DATA, opt->length * 4, + opt->opt_data)) { + nla_nest_cancel(skb, nest); + return -ENOMEM; + } + offset += sizeof(*opt) + opt->length * 4; } nla_nest_end(skb, nest); @@ -526,7 +569,7 @@ static int ip_tun_fill_encap_opts_erspan(struct sk_buff *skb, return -ENOMEM; md = ip_tunnel_info_opts(tun_info); - if (nla_put_u32(skb, LWTUNNEL_IP_OPT_ERSPAN_VER, md->version)) + if (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_VER, md->version)) goto err; if (md->version == 1 && @@ -602,13 +645,18 @@ static int ip_tun_opts_nlsize(struct ip_tunnel_info *info) opt_len = nla_total_size(0); /* LWTUNNEL_IP_OPTS */ if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { - struct geneve_opt *opt = ip_tunnel_info_opts(info); - - opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_GENEVE */ - + nla_total_size(2) /* OPT_GENEVE_CLASS */ - + nla_total_size(1) /* OPT_GENEVE_TYPE */ - + nla_total_size(opt->length * 4); - /* OPT_GENEVE_DATA */ + struct geneve_opt *opt; + int offset = 0; + + opt_len += nla_total_size(0); /* LWTUNNEL_IP_OPTS_GENEVE */ + while (info->options_len > offset) { + opt = ip_tunnel_info_opts(info) + offset; + opt_len += nla_total_size(2) /* OPT_GENEVE_CLASS */ + + nla_total_size(1) /* OPT_GENEVE_TYPE */ + + nla_total_size(opt->length * 4); + /* OPT_GENEVE_DATA */ + offset += sizeof(*opt) + opt->length * 4; + } } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) { opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_VXLAN */ + nla_total_size(4); /* OPT_VXLAN_GBP */ @@ -661,12 +709,14 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = { }; static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { + [LWTUNNEL_IP6_UNSPEC] = { .strict_start_type = LWTUNNEL_IP6_OPTS }, [LWTUNNEL_IP6_ID] = { .type = NLA_U64 }, [LWTUNNEL_IP6_DST] = { .len = sizeof(struct in6_addr) }, [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) }, [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 }, [LWTUNNEL_IP6_TC] = { .type = NLA_U8 }, [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, + [LWTUNNEL_IP6_OPTS] = { .type = NLA_NESTED }, }; static int ip6_tun_build_state(struct nlattr *attr, diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 32e20b758b68..f35308ff84c3 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1412,6 +1412,9 @@ static int __init wait_for_devices(void) struct net_device *dev; int found = 0; + /* make sure deferred device probes are finished */ + wait_for_device_probe(); + rtnl_lock(); for_each_netdev(&init_net, dev) { if (ic_is_init_dev(dev)) { diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c index 168b72e18be0..e32e41b99f0f 100644 --- a/net/ipv4/netfilter/nf_flow_table_ipv4.c +++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c @@ -10,7 +10,7 @@ static struct nf_flowtable_type flowtable_ipv4 = { .family = NFPROTO_IPV4, .init = nf_flow_table_init, .setup = nf_flow_table_offload_setup, - .action = nf_flow_rule_route, + .action = nf_flow_rule_route_ipv4, .free = nf_flow_table_free, .hook = nf_flow_offload_ip_hook, .owner = THIS_MODULE, diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index fc34fd1668d6..511eaa94e2d1 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -23,7 +23,6 @@ static void remove_nexthop(struct net *net, struct nexthop *nh, #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS) static const struct nla_policy rtm_nh_policy[NHA_MAX + 1] = { - [NHA_UNSPEC] = { .strict_start_type = NHA_UNSPEC + 1 }, [NHA_ID] = { .type = NLA_U32 }, [NHA_GROUP] = { .type = NLA_BINARY }, [NHA_GROUP_TYPE] = { .type = NLA_U16 }, diff --git a/net/ipv4/route.c b/net/ipv4/route.c index dcc4fa10138d..f88c93c38f11 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2019,10 +2019,52 @@ static int ip_mkroute_input(struct sk_buff *skb, return __mkroute_input(skb, res, in_dev, daddr, saddr, tos); } +/* Implements all the saddr-related checks as ip_route_input_slow(), + * assuming daddr is valid and the destination is not a local broadcast one. + * Uses the provided hint instead of performing a route lookup. + */ +int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, + u8 tos, struct net_device *dev, + const struct sk_buff *hint) +{ + struct in_device *in_dev = __in_dev_get_rcu(dev); + struct rtable *rt = (struct rtable *)hint; + struct net *net = dev_net(dev); + int err = -EINVAL; + u32 tag = 0; + + if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) + goto martian_source; + + if (ipv4_is_zeronet(saddr)) + goto martian_source; + + if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) + goto martian_source; + + if (rt->rt_type != RTN_LOCAL) + goto skip_validate_source; + + tos &= IPTOS_RT_MASK; + err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag); + if (err < 0) + goto martian_source; + +skip_validate_source: + skb_dst_copy(skb, hint); + return 0; + +martian_source: + ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); + return err; +} + /* * NOTE. We drop all the packets that has local source * addresses, because every properly looped back packet * must have correct destination already attached by output routine. + * Changes in the enforced policies must be applied also to + * ip_route_use_hint(). * * Such approach solves two big problems: * 1. Not simplex devices are handled properly. diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 59ded25acd04..fcb2cd167f64 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -340,6 +340,10 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, user_key[i * 4 + 1], user_key[i * 4 + 2], user_key[i * 4 + 3]); + + if (WARN_ON_ONCE(off >= tbl.maxlen - 1)) + break; + if (i + 1 < n_keys) off += snprintf(tbl.data + off, tbl.maxlen - off, ","); } @@ -1037,7 +1041,7 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_fib_multipath_hash_policy, .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, + .extra2 = &two, }, #endif { diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index c445a81d144e..3737ec096650 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -256,6 +256,9 @@ void tcp_get_available_congestion_control(char *buf, size_t maxlen) offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ca->name); + + if (WARN_ON_ONCE(offs >= maxlen)) + break; } rcu_read_unlock(); } @@ -285,6 +288,9 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ca->name); + + if (WARN_ON_ONCE(offs >= maxlen)) + break; } rcu_read_unlock(); } diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c index 4849edb62d52..12ab5db2b71c 100644 --- a/net/ipv4/tcp_ulp.c +++ b/net/ipv4/tcp_ulp.c @@ -92,6 +92,9 @@ void tcp_get_available_ulp(char *buf, size_t maxlen) offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ulp_ops->name); + + if (WARN_ON_ONCE(offs >= maxlen)) + break; } rcu_read_unlock(); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index da805ee7558b..4da5758cc718 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1297,6 +1297,27 @@ out: #define UDP_SKB_IS_STATELESS 0x80000000 +/* all head states (dst, sk, nf conntrack) except skb extensions are + * cleared by udp_rcv(). + * + * We need to preserve secpath, if present, to eventually process + * IP_CMSG_PASSSEC at recvmsg() time. + * + * Other extensions can be cleared. + */ +static bool udp_try_make_stateless(struct sk_buff *skb) +{ + if (!skb_has_extensions(skb)) + return true; + + if (!secpath_exists(skb)) { + skb_ext_reset(skb); + return true; + } + + return false; +} + static void udp_set_dev_scratch(struct sk_buff *skb) { struct udp_dev_scratch *scratch = udp_skb_scratch(skb); @@ -1308,11 +1329,7 @@ static void udp_set_dev_scratch(struct sk_buff *skb) scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); scratch->is_linear = !skb_is_nonlinear(skb); #endif - /* all head states execept sp (dst, sk, nf) are always cleared by - * udp_rcv() and we need to preserve secpath, if present, to eventually - * process IP_CMSG_PASSSEC at recvmsg() time - */ - if (likely(!skb_sec_path(skb))) + if (udp_try_make_stateless(skb)) scratch->_tsize_state |= UDP_SKB_IS_STATELESS; } diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index ecff3fce9807..89ba7c87de5d 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -92,7 +92,7 @@ static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb) int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb) { return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, - net, sk, skb, NULL, skb_dst(skb)->dev, + net, sk, skb, skb->dev, skb_dst(skb)->dev, __xfrm4_output, !(IPCB(skb)->flags & IPSKB_REROUTED)); } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index f66bc2af4e9d..7bae6a91b487 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1461,6 +1461,8 @@ out: } #endif goto failure; + } else if (fib6_requires_src(rt)) { + fib6_routes_require_src_inc(info->nl_net); } return err; @@ -1933,6 +1935,8 @@ int fib6_del(struct fib6_info *rt, struct nl_info *info) struct fib6_info *cur = rcu_dereference_protected(*rtp, lockdep_is_held(&table->tb6_lock)); if (rt == cur) { + if (fib6_requires_src(cur)) + fib6_routes_require_src_dec(info->nl_net); fib6_del_route(table, fn, rtp, info); return 0; } diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index ef7f707d9ae3..7b089d0ac8cd 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -86,11 +86,27 @@ static void ip6_sublist_rcv_finish(struct list_head *head) } } +static bool ip6_can_use_hint(const struct sk_buff *skb, + const struct sk_buff *hint) +{ + return hint && !skb_dst(skb) && + ipv6_addr_equal(&ipv6_hdr(hint)->daddr, &ipv6_hdr(skb)->daddr); +} + +static struct sk_buff *ip6_extract_route_hint(const struct net *net, + struct sk_buff *skb) +{ + if (fib6_routes_require_src(net) || fib6_has_custom_rules(net)) + return NULL; + + return skb; +} + static void ip6_list_rcv_finish(struct net *net, struct sock *sk, struct list_head *head) { + struct sk_buff *skb, *next, *hint = NULL; struct dst_entry *curr_dst = NULL; - struct sk_buff *skb, *next; struct list_head sublist; INIT_LIST_HEAD(&sublist); @@ -104,9 +120,15 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk, skb = l3mdev_ip6_rcv(skb); if (!skb) continue; - ip6_rcv_finish_core(net, sk, skb); + + if (ip6_can_use_hint(skb, hint)) + skb_dst_copy(skb, hint); + else + ip6_rcv_finish_core(net, sk, skb); dst = skb_dst(skb); if (curr_dst != dst) { + hint = ip6_extract_route_hint(net, skb); + /* dispatch old sublist */ if (!list_empty(&sublist)) ip6_sublist_rcv_finish(&sublist); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 71827b56c006..945508a7cb0f 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -160,7 +160,7 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net_device *dev = skb_dst(skb)->dev; + struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev; struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); skb->protocol = htons(ETH_P_IPV6); @@ -173,7 +173,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) } return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, - net, sk, skb, NULL, dev, + net, sk, skb, indev, dev, ip6_finish_output, !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 264c292e7dcc..79fc012dd2ca 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -363,8 +363,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, break; case IPV6_TRANSPARENT: - if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) && - !ns_capable(net->user_ns, CAP_NET_RAW)) { + if (valbool && !ns_capable(net->user_ns, CAP_NET_RAW) && + !ns_capable(net->user_ns, CAP_NET_ADMIN)) { retv = -EPERM; break; } diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 69443e9a3aa5..0594131fa46d 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -128,9 +128,9 @@ config IP6_NF_MATCH_HL depends on NETFILTER_ADVANCED select NETFILTER_XT_MATCH_HL ---help--- - This is a backwards-compat option for the user's convenience - (e.g. when running oldconfig). It selects - CONFIG_NETFILTER_XT_MATCH_HL. + This is a backwards-compat option for the user's convenience + (e.g. when running oldconfig). It selects + CONFIG_NETFILTER_XT_MATCH_HL. config IP6_NF_MATCH_IPV6HEADER tristate '"ipv6header" IPv6 Extension Headers Match' @@ -184,9 +184,9 @@ config IP6_NF_TARGET_HL depends on NETFILTER_ADVANCED && IP6_NF_MANGLE select NETFILTER_XT_TARGET_HL ---help--- - This is a backwards-compatible option for the user's convenience - (e.g. when running oldconfig). It selects - CONFIG_NETFILTER_XT_TARGET_HL. + This is a backwards-compatible option for the user's convenience + (e.g. when running oldconfig). It selects + CONFIG_NETFILTER_XT_TARGET_HL. config IP6_NF_FILTER tristate "Packet filtering" @@ -245,14 +245,14 @@ config IP6_NF_RAW # security table for MAC policy config IP6_NF_SECURITY - tristate "Security table" - depends on SECURITY - depends on NETFILTER_ADVANCED - help - This option adds a `security' table to iptables, for use - with Mandatory Access Control (MAC) policy. - - If unsure, say N. + tristate "Security table" + depends on SECURITY + depends on NETFILTER_ADVANCED + help + This option adds a `security' table to iptables, for use + with Mandatory Access Control (MAC) policy. + + If unsure, say N. config IP6_NF_NAT tristate "ip6tables NAT support" diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c index f069bc0dc056..a8566ee12e83 100644 --- a/net/ipv6/netfilter/nf_flow_table_ipv6.c +++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c @@ -11,7 +11,7 @@ static struct nf_flowtable_type flowtable_ipv6 = { .family = NFPROTO_IPV6, .init = nf_flow_table_init, .setup = nf_flow_table_offload_setup, - .action = nf_flow_rule_route, + .action = nf_flow_rule_route_ipv6, .free = nf_flow_table_free, .hook = nf_flow_offload_ipv6_hook, .owner = THIS_MODULE, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index edcb52543518..b59940416cb5 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -634,7 +634,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh) * Router Reachability Probe MUST be rate-limited * to no more than one per minute. */ - if (fib6_nh->fib_nh_gw_family) + if (!fib6_nh->fib_nh_gw_family) return; nh_gw = &fib6_nh->fib_nh_gw6; @@ -6199,6 +6199,9 @@ static int __net_init ip6_route_net_init(struct net *net) dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, ip6_template_metrics, true); INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached); +#ifdef CONFIG_IPV6_SUBTREES + net->ipv6.fib6_routes_require_src = 0; +#endif #endif net->ipv6.sysctl.flush_delay = 0; diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index e70567446f28..85a5447a3e8d 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -149,8 +149,9 @@ static void advance_nextseg(struct ipv6_sr_hdr *srh, struct in6_addr *daddr) *daddr = *addr; } -int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, - u32 tbl_id) +static int +seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, + u32 tbl_id, bool local_delivery) { struct net *net = dev_net(skb->dev); struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -158,6 +159,7 @@ int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, struct dst_entry *dst = NULL; struct rt6_info *rt; struct flowi6 fl6; + int dev_flags = 0; fl6.flowi6_iif = skb->dev->ifindex; fl6.daddr = nhaddr ? *nhaddr : hdr->daddr; @@ -182,7 +184,13 @@ int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, dst = &rt->dst; } - if (dst && dst->dev->flags & IFF_LOOPBACK && !dst->error) { + /* we want to discard traffic destined for local packet processing, + * if @local_delivery is set to false. + */ + if (!local_delivery) + dev_flags |= IFF_LOOPBACK; + + if (dst && (dst->dev->flags & dev_flags) && !dst->error) { dst_release(dst); dst = NULL; } @@ -199,6 +207,12 @@ out: return dst->error; } +int seg6_lookup_nexthop(struct sk_buff *skb, + struct in6_addr *nhaddr, u32 tbl_id) +{ + return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false); +} + /* regular endpoint function */ static int input_action_end(struct sk_buff *skb, struct seg6_local_lwt *slwt) { @@ -396,7 +410,7 @@ static int input_action_end_dt6(struct sk_buff *skb, skb_set_transport_header(skb, sizeof(struct ipv6hdr)); - seg6_lookup_nexthop(skb, NULL, slwt->table); + seg6_lookup_any_nexthop(skb, NULL, slwt->table, true); return dst_input(skb); diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index eecac1b7148e..fbe51d40bd7e 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -187,7 +187,7 @@ skip_frag: int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, - net, sk, skb, NULL, skb_dst(skb)->dev, + net, sk, skb, skb->dev, skb_dst(skb)->dev, __xfrm6_output, !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 4f03ebe732fa..6cbb1286d6c0 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -32,7 +32,8 @@ mac80211-y := \ chan.o \ trace.o mlme.o \ tdls.o \ - ocb.o + ocb.o \ + airtime.o mac80211-$(CONFIG_MAC80211_LEDS) += led.o mac80211-$(CONFIG_MAC80211_DEBUGFS) += \ diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c new file mode 100644 index 000000000000..63cb0028b02d --- /dev/null +++ b/net/mac80211/airtime.c @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (C) 2019 Felix Fietkau <[email protected]> + */ + +#include <net/mac80211.h> +#include "ieee80211_i.h" +#include "sta_info.h" + +#define AVG_PKT_SIZE 1024 + +/* Number of bits for an average sized packet */ +#define MCS_NBITS (AVG_PKT_SIZE << 3) + +/* Number of kilo-symbols (symbols * 1024) for a packet with (bps) bits per + * symbol. We use k-symbols to avoid rounding in the _TIME macros below. + */ +#define MCS_N_KSYMS(bps) DIV_ROUND_UP(MCS_NBITS << 10, (bps)) + +/* Transmission time (in 1024 * usec) for a packet containing (ksyms) * 1024 + * symbols. + */ +#define MCS_SYMBOL_TIME(sgi, ksyms) \ + (sgi ? \ + ((ksyms) * 4 * 18) / 20 : /* 3.6 us per sym */ \ + ((ksyms) * 4) /* 4.0 us per sym */ \ + ) + +/* Transmit duration for the raw data part of an average sized packet */ +#define MCS_DURATION(streams, sgi, bps) \ + ((u32)MCS_SYMBOL_TIME(sgi, MCS_N_KSYMS((streams) * (bps)))) + +#define MCS_DURATION_S(shift, streams, sgi, bps) \ + ((u16)((MCS_DURATION(streams, sgi, bps) >> shift))) + +/* These should match the values in enum nl80211_he_gi */ +#define HE_GI_08 0 +#define HE_GI_16 1 +#define HE_GI_32 2 + +/* Transmission time (1024 usec) for a packet containing (ksyms) * k-symbols */ +#define HE_SYMBOL_TIME(gi, ksyms) \ + (gi == HE_GI_08 ? \ + ((ksyms) * 16 * 17) / 20 : /* 13.6 us per sym */ \ + (gi == HE_GI_16 ? \ + ((ksyms) * 16 * 18) / 20 : /* 14.4 us per sym */ \ + ((ksyms) * 16) /* 16.0 us per sym */ \ + )) + +/* Transmit duration for the raw data part of an average sized packet */ +#define HE_DURATION(streams, gi, bps) \ + ((u32)HE_SYMBOL_TIME(gi, MCS_N_KSYMS((streams) * (bps)))) + +#define HE_DURATION_S(shift, streams, gi, bps) \ + (HE_DURATION(streams, gi, bps) >> shift) + +#define BW_20 0 +#define BW_40 1 +#define BW_80 2 +#define BW_160 3 + +/* + * Define group sort order: HT40 -> SGI -> #streams + */ +#define IEEE80211_MAX_STREAMS 4 +#define IEEE80211_HT_STREAM_GROUPS 4 /* BW(=2) * SGI(=2) */ +#define IEEE80211_VHT_STREAM_GROUPS 8 /* BW(=4) * SGI(=2) */ + +#define IEEE80211_HE_MAX_STREAMS 8 +#define IEEE80211_HE_STREAM_GROUPS 12 /* BW(=4) * GI(=3) */ + +#define IEEE80211_HT_GROUPS_NB (IEEE80211_MAX_STREAMS * \ + IEEE80211_HT_STREAM_GROUPS) +#define IEEE80211_VHT_GROUPS_NB (IEEE80211_MAX_STREAMS * \ + IEEE80211_VHT_STREAM_GROUPS) +#define IEEE80211_HE_GROUPS_NB (IEEE80211_HE_MAX_STREAMS * \ + IEEE80211_HE_STREAM_GROUPS) +#define IEEE80211_GROUPS_NB (IEEE80211_HT_GROUPS_NB + \ + IEEE80211_VHT_GROUPS_NB + \ + IEEE80211_HE_GROUPS_NB) + +#define IEEE80211_HT_GROUP_0 0 +#define IEEE80211_VHT_GROUP_0 (IEEE80211_HT_GROUP_0 + IEEE80211_HT_GROUPS_NB) +#define IEEE80211_HE_GROUP_0 (IEEE80211_VHT_GROUP_0 + IEEE80211_VHT_GROUPS_NB) + +#define MCS_GROUP_RATES 12 + +#define HT_GROUP_IDX(_streams, _sgi, _ht40) \ + IEEE80211_HT_GROUP_0 + \ + IEEE80211_MAX_STREAMS * 2 * _ht40 + \ + IEEE80211_MAX_STREAMS * _sgi + \ + _streams - 1 + +#define _MAX(a, b) (((a)>(b))?(a):(b)) + +#define GROUP_SHIFT(duration) \ + _MAX(0, 16 - __builtin_clz(duration)) + +/* MCS rate information for an MCS group */ +#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \ + [HT_GROUP_IDX(_streams, _sgi, _ht40)] = { \ + .shift = _s, \ + .duration = { \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 54 : 26), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 108 : 52), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 162 : 78), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 216 : 104), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 324 : 156), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 432 : 208), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 486 : 234), \ + MCS_DURATION_S(_s, _streams, _sgi, _ht40 ? 540 : 260) \ + } \ +} + +#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \ + GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26)) + +#define MCS_GROUP(_streams, _sgi, _ht40) \ + __MCS_GROUP(_streams, _sgi, _ht40, \ + MCS_GROUP_SHIFT(_streams, _sgi, _ht40)) + +#define VHT_GROUP_IDX(_streams, _sgi, _bw) \ + (IEEE80211_VHT_GROUP_0 + \ + IEEE80211_MAX_STREAMS * 2 * (_bw) + \ + IEEE80211_MAX_STREAMS * (_sgi) + \ + (_streams) - 1) + +#define BW2VBPS(_bw, r4, r3, r2, r1) \ + (_bw == BW_160 ? r4 : _bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1) + +#define __VHT_GROUP(_streams, _sgi, _bw, _s) \ + [VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \ + .shift = _s, \ + .duration = { \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 234, 117, 54, 26)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 468, 234, 108, 52)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 702, 351, 162, 78)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 936, 468, 216, 104)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 1404, 702, 324, 156)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 1872, 936, 432, 208)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 2106, 1053, 486, 234)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 2340, 1170, 540, 260)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 2808, 1404, 648, 312)), \ + MCS_DURATION_S(_s, _streams, _sgi, \ + BW2VBPS(_bw, 3120, 1560, 720, 346)) \ + } \ +} + +#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \ + GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \ + BW2VBPS(_bw, 243, 117, 54, 26))) + +#define VHT_GROUP(_streams, _sgi, _bw) \ + __VHT_GROUP(_streams, _sgi, _bw, \ + VHT_GROUP_SHIFT(_streams, _sgi, _bw)) + + +#define HE_GROUP_IDX(_streams, _gi, _bw) \ + (IEEE80211_HE_GROUP_0 + \ + IEEE80211_HE_MAX_STREAMS * 3 * (_bw) + \ + IEEE80211_HE_MAX_STREAMS * (_gi) + \ + (_streams) - 1) + +#define __HE_GROUP(_streams, _gi, _bw, _s) \ + [HE_GROUP_IDX(_streams, _gi, _bw)] = { \ + .shift = _s, \ + .duration = { \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 979, 489, 230, 115)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 1958, 979, 475, 230)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 2937, 1468, 705, 345)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 3916, 1958, 936, 475)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 5875, 2937, 1411, 705)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 7833, 3916, 1872, 936)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 8827, 4406, 2102, 1051)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 9806, 4896, 2347, 1166)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 11764, 5875, 2808, 1411)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 13060, 6523, 3124, 1555)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 14702, 7344, 3513, 1756)), \ + HE_DURATION_S(_s, _streams, _gi, \ + BW2VBPS(_bw, 16329, 8164, 3902, 1944)) \ + } \ +} + +#define HE_GROUP_SHIFT(_streams, _gi, _bw) \ + GROUP_SHIFT(HE_DURATION(_streams, _gi, \ + BW2VBPS(_bw, 979, 489, 230, 115))) + +#define HE_GROUP(_streams, _gi, _bw) \ + __HE_GROUP(_streams, _gi, _bw, \ + HE_GROUP_SHIFT(_streams, _gi, _bw)) +struct mcs_group { + u8 shift; + u16 duration[MCS_GROUP_RATES]; +}; + +static const struct mcs_group airtime_mcs_groups[] = { + MCS_GROUP(1, 0, BW_20), + MCS_GROUP(2, 0, BW_20), + MCS_GROUP(3, 0, BW_20), + MCS_GROUP(4, 0, BW_20), + + MCS_GROUP(1, 1, BW_20), + MCS_GROUP(2, 1, BW_20), + MCS_GROUP(3, 1, BW_20), + MCS_GROUP(4, 1, BW_20), + + MCS_GROUP(1, 0, BW_40), + MCS_GROUP(2, 0, BW_40), + MCS_GROUP(3, 0, BW_40), + MCS_GROUP(4, 0, BW_40), + + MCS_GROUP(1, 1, BW_40), + MCS_GROUP(2, 1, BW_40), + MCS_GROUP(3, 1, BW_40), + MCS_GROUP(4, 1, BW_40), + + VHT_GROUP(1, 0, BW_20), + VHT_GROUP(2, 0, BW_20), + VHT_GROUP(3, 0, BW_20), + VHT_GROUP(4, 0, BW_20), + + VHT_GROUP(1, 1, BW_20), + VHT_GROUP(2, 1, BW_20), + VHT_GROUP(3, 1, BW_20), + VHT_GROUP(4, 1, BW_20), + + VHT_GROUP(1, 0, BW_40), + VHT_GROUP(2, 0, BW_40), + VHT_GROUP(3, 0, BW_40), + VHT_GROUP(4, 0, BW_40), + + VHT_GROUP(1, 1, BW_40), + VHT_GROUP(2, 1, BW_40), + VHT_GROUP(3, 1, BW_40), + VHT_GROUP(4, 1, BW_40), + + VHT_GROUP(1, 0, BW_80), + VHT_GROUP(2, 0, BW_80), + VHT_GROUP(3, 0, BW_80), + VHT_GROUP(4, 0, BW_80), + + VHT_GROUP(1, 1, BW_80), + VHT_GROUP(2, 1, BW_80), + VHT_GROUP(3, 1, BW_80), + VHT_GROUP(4, 1, BW_80), + + VHT_GROUP(1, 0, BW_160), + VHT_GROUP(2, 0, BW_160), + VHT_GROUP(3, 0, BW_160), + VHT_GROUP(4, 0, BW_160), + + VHT_GROUP(1, 1, BW_160), + VHT_GROUP(2, 1, BW_160), + VHT_GROUP(3, 1, BW_160), + VHT_GROUP(4, 1, BW_160), + + HE_GROUP(1, HE_GI_08, BW_20), + HE_GROUP(2, HE_GI_08, BW_20), + HE_GROUP(3, HE_GI_08, BW_20), + HE_GROUP(4, HE_GI_08, BW_20), + HE_GROUP(5, HE_GI_08, BW_20), + HE_GROUP(6, HE_GI_08, BW_20), + HE_GROUP(7, HE_GI_08, BW_20), + HE_GROUP(8, HE_GI_08, BW_20), + + HE_GROUP(1, HE_GI_16, BW_20), + HE_GROUP(2, HE_GI_16, BW_20), + HE_GROUP(3, HE_GI_16, BW_20), + HE_GROUP(4, HE_GI_16, BW_20), + HE_GROUP(5, HE_GI_16, BW_20), + HE_GROUP(6, HE_GI_16, BW_20), + HE_GROUP(7, HE_GI_16, BW_20), + HE_GROUP(8, HE_GI_16, BW_20), + + HE_GROUP(1, HE_GI_32, BW_20), + HE_GROUP(2, HE_GI_32, BW_20), + HE_GROUP(3, HE_GI_32, BW_20), + HE_GROUP(4, HE_GI_32, BW_20), + HE_GROUP(5, HE_GI_32, BW_20), + HE_GROUP(6, HE_GI_32, BW_20), + HE_GROUP(7, HE_GI_32, BW_20), + HE_GROUP(8, HE_GI_32, BW_20), + + HE_GROUP(1, HE_GI_08, BW_40), + HE_GROUP(2, HE_GI_08, BW_40), + HE_GROUP(3, HE_GI_08, BW_40), + HE_GROUP(4, HE_GI_08, BW_40), + HE_GROUP(5, HE_GI_08, BW_40), + HE_GROUP(6, HE_GI_08, BW_40), + HE_GROUP(7, HE_GI_08, BW_40), + HE_GROUP(8, HE_GI_08, BW_40), + + HE_GROUP(1, HE_GI_16, BW_40), + HE_GROUP(2, HE_GI_16, BW_40), + HE_GROUP(3, HE_GI_16, BW_40), + HE_GROUP(4, HE_GI_16, BW_40), + HE_GROUP(5, HE_GI_16, BW_40), + HE_GROUP(6, HE_GI_16, BW_40), + HE_GROUP(7, HE_GI_16, BW_40), + HE_GROUP(8, HE_GI_16, BW_40), + + HE_GROUP(1, HE_GI_32, BW_40), + HE_GROUP(2, HE_GI_32, BW_40), + HE_GROUP(3, HE_GI_32, BW_40), + HE_GROUP(4, HE_GI_32, BW_40), + HE_GROUP(5, HE_GI_32, BW_40), + HE_GROUP(6, HE_GI_32, BW_40), + HE_GROUP(7, HE_GI_32, BW_40), + HE_GROUP(8, HE_GI_32, BW_40), + + HE_GROUP(1, HE_GI_08, BW_80), + HE_GROUP(2, HE_GI_08, BW_80), + HE_GROUP(3, HE_GI_08, BW_80), + HE_GROUP(4, HE_GI_08, BW_80), + HE_GROUP(5, HE_GI_08, BW_80), + HE_GROUP(6, HE_GI_08, BW_80), + HE_GROUP(7, HE_GI_08, BW_80), + HE_GROUP(8, HE_GI_08, BW_80), + + HE_GROUP(1, HE_GI_16, BW_80), + HE_GROUP(2, HE_GI_16, BW_80), + HE_GROUP(3, HE_GI_16, BW_80), + HE_GROUP(4, HE_GI_16, BW_80), + HE_GROUP(5, HE_GI_16, BW_80), + HE_GROUP(6, HE_GI_16, BW_80), + HE_GROUP(7, HE_GI_16, BW_80), + HE_GROUP(8, HE_GI_16, BW_80), + + HE_GROUP(1, HE_GI_32, BW_80), + HE_GROUP(2, HE_GI_32, BW_80), + HE_GROUP(3, HE_GI_32, BW_80), + HE_GROUP(4, HE_GI_32, BW_80), + HE_GROUP(5, HE_GI_32, BW_80), + HE_GROUP(6, HE_GI_32, BW_80), + HE_GROUP(7, HE_GI_32, BW_80), + HE_GROUP(8, HE_GI_32, BW_80), + + HE_GROUP(1, HE_GI_08, BW_160), + HE_GROUP(2, HE_GI_08, BW_160), + HE_GROUP(3, HE_GI_08, BW_160), + HE_GROUP(4, HE_GI_08, BW_160), + HE_GROUP(5, HE_GI_08, BW_160), + HE_GROUP(6, HE_GI_08, BW_160), + HE_GROUP(7, HE_GI_08, BW_160), + HE_GROUP(8, HE_GI_08, BW_160), + + HE_GROUP(1, HE_GI_16, BW_160), + HE_GROUP(2, HE_GI_16, BW_160), + HE_GROUP(3, HE_GI_16, BW_160), + HE_GROUP(4, HE_GI_16, BW_160), + HE_GROUP(5, HE_GI_16, BW_160), + HE_GROUP(6, HE_GI_16, BW_160), + HE_GROUP(7, HE_GI_16, BW_160), + HE_GROUP(8, HE_GI_16, BW_160), + + HE_GROUP(1, HE_GI_32, BW_160), + HE_GROUP(2, HE_GI_32, BW_160), + HE_GROUP(3, HE_GI_32, BW_160), + HE_GROUP(4, HE_GI_32, BW_160), + HE_GROUP(5, HE_GI_32, BW_160), + HE_GROUP(6, HE_GI_32, BW_160), + HE_GROUP(7, HE_GI_32, BW_160), + HE_GROUP(8, HE_GI_32, BW_160), +}; + +static u32 +ieee80211_calc_legacy_rate_duration(u16 bitrate, bool short_pre, + bool cck, int len) +{ + u32 duration; + + if (cck) { + duration = 144 + 48; /* preamble + PLCP */ + if (short_pre) + duration >>= 1; + + duration += 10; /* SIFS */ + } else { + duration = 20 + 16; /* premable + SIFS */ + } + + len <<= 3; + duration += (len * 10) / bitrate; + + return duration; +} + +u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw, + struct ieee80211_rx_status *status, + int len) +{ + struct ieee80211_supported_band *sband; + const struct ieee80211_rate *rate; + bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI; + bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE; + int bw, streams; + int group, idx; + u32 duration; + bool cck; + + switch (status->bw) { + case RATE_INFO_BW_20: + bw = BW_20; + break; + case RATE_INFO_BW_40: + bw = BW_40; + break; + case RATE_INFO_BW_80: + bw = BW_80; + break; + case RATE_INFO_BW_160: + bw = BW_160; + break; + default: + WARN_ON_ONCE(1); + return 0; + } + + switch (status->encoding) { + case RX_ENC_LEGACY: + if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ)) + return 0; + + sband = hw->wiphy->bands[status->band]; + if (!sband || status->rate_idx > sband->n_bitrates) + return 0; + + rate = &sband->bitrates[status->rate_idx]; + cck = rate->flags & IEEE80211_RATE_MANDATORY_B; + + return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp, + cck, len); + + case RX_ENC_VHT: + streams = status->nss; + idx = status->rate_idx; + group = VHT_GROUP_IDX(streams, sgi, bw); + break; + case RX_ENC_HT: + streams = ((status->rate_idx >> 3) & 3) + 1; + idx = status->rate_idx & 7; + group = HT_GROUP_IDX(streams, sgi, bw); + break; + case RX_ENC_HE: + streams = status->nss; + idx = status->rate_idx; + group = HE_GROUP_IDX(streams, status->he_gi, bw); + break; + default: + WARN_ON_ONCE(1); + return 0; + } + + if (WARN_ON_ONCE((status->encoding != RX_ENC_HE && streams > 4) || + (status->encoding == RX_ENC_HE && streams > 8))) + return 0; + + duration = airtime_mcs_groups[group].duration[idx]; + duration <<= airtime_mcs_groups[group].shift; + duration *= len; + duration /= AVG_PKT_SIZE; + duration /= 1024; + + duration += 36 + (streams << 2); + + return duration; +} +EXPORT_SYMBOL_GPL(ieee80211_calc_rx_airtime); + +static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw, + struct ieee80211_tx_rate *rate, + u8 band, int len) +{ + struct ieee80211_rx_status stat = { + .band = band, + }; + + if (rate->idx < 0 || !rate->count) + return 0; + + if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + stat.bw = RATE_INFO_BW_80; + else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + stat.bw = RATE_INFO_BW_40; + else + stat.bw = RATE_INFO_BW_20; + + stat.enc_flags = 0; + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + stat.enc_flags |= RX_ENC_FLAG_SHORTPRE; + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + stat.enc_flags |= RX_ENC_FLAG_SHORT_GI; + + stat.rate_idx = rate->idx; + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + stat.encoding = RX_ENC_VHT; + stat.rate_idx = ieee80211_rate_get_vht_mcs(rate); + stat.nss = ieee80211_rate_get_vht_nss(rate); + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + stat.encoding = RX_ENC_HT; + } else { + stat.encoding = RX_ENC_LEGACY; + } + + return ieee80211_calc_rx_airtime(hw, &stat, len); +} + +u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw, + struct ieee80211_tx_info *info, + int len) +{ + u32 duration = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) { + struct ieee80211_tx_rate *rate = &info->status.rates[i]; + u32 cur_duration; + + cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate, + info->band, len); + if (!cur_duration) + break; + + duration += cur_duration * rate->count; + } + + return duration; +} +EXPORT_SYMBOL_GPL(ieee80211_calc_tx_airtime); + +u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *pubsta, + int len) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_chanctx_conf *conf; + int rateidx, shift = 0; + bool cck, short_pream; + u32 basic_rates; + u8 band = 0; + u16 rate; + + len += 38; /* Ethernet header length */ + + conf = rcu_dereference(vif->chanctx_conf); + if (conf) { + band = conf->def.chan->band; + shift = ieee80211_chandef_get_shift(&conf->def); + } + + if (pubsta) { + struct sta_info *sta = container_of(pubsta, struct sta_info, + sta); + + return ieee80211_calc_tx_airtime_rate(hw, + &sta->tx_stats.last_rate, + band, len); + } + + if (!conf) + return 0; + + /* No station to get latest rate from, so calculate the worst-case + * duration using the lowest configured basic rate. + */ + sband = hw->wiphy->bands[band]; + + basic_rates = vif->bss_conf.basic_rates; + short_pream = vif->bss_conf.use_short_preamble; + + rateidx = basic_rates ? ffs(basic_rates) - 1 : 0; + rate = sband->bitrates[rateidx].bitrate << shift; + cck = sband->bitrates[rateidx].flags & IEEE80211_RATE_MANDATORY_B; + + return ieee80211_calc_legacy_rate_duration(rate, short_pream, cck, len); +} diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 568b3b276931..ad41d74530c6 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -59,6 +59,8 @@ static const struct file_operations name## _ops = { \ debugfs_create_file(#name, mode, phyd, local, &name## _ops); +DEBUGFS_READONLY_FILE(hw_conf, "%x", + local->hw.conf.flags); DEBUGFS_READONLY_FILE(user_power, "%d", local->user_power_level); DEBUGFS_READONLY_FILE(power, "%d", @@ -148,6 +150,87 @@ static const struct file_operations aqm_ops = { .llseek = default_llseek, }; +static ssize_t aql_txq_limit_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[400]; + int len = 0; + + len = scnprintf(buf, sizeof(buf), + "AC AQL limit low AQL limit high\n" + "VO %u %u\n" + "VI %u %u\n" + "BE %u %u\n" + "BK %u %u\n", + local->aql_txq_limit_low[IEEE80211_AC_VO], + local->aql_txq_limit_high[IEEE80211_AC_VO], + local->aql_txq_limit_low[IEEE80211_AC_VI], + local->aql_txq_limit_high[IEEE80211_AC_VI], + local->aql_txq_limit_low[IEEE80211_AC_BE], + local->aql_txq_limit_high[IEEE80211_AC_BE], + local->aql_txq_limit_low[IEEE80211_AC_BK], + local->aql_txq_limit_high[IEEE80211_AC_BK]); + return simple_read_from_buffer(user_buf, count, ppos, + buf, len); +} + +static ssize_t aql_txq_limit_write(struct file *file, + const char __user *user_buf, + size_t count, + loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[100]; + size_t len; + u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old; + struct sta_info *sta; + + if (count > sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[sizeof(buf) - 1] = 0; + len = strlen(buf); + if (len > 0 && buf[len - 1] == '\n') + buf[len - 1] = 0; + + if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3) + return -EINVAL; + + if (ac >= IEEE80211_NUM_ACS) + return -EINVAL; + + q_limit_low_old = local->aql_txq_limit_low[ac]; + q_limit_high_old = local->aql_txq_limit_high[ac]; + + local->aql_txq_limit_low[ac] = q_limit_low; + local->aql_txq_limit_high[ac] = q_limit_high; + + mutex_lock(&local->sta_mtx); + list_for_each_entry(sta, &local->sta_list, list) { + /* If a sta has customized queue limits, keep it */ + if (sta->airtime[ac].aql_limit_low == q_limit_low_old && + sta->airtime[ac].aql_limit_high == q_limit_high_old) { + sta->airtime[ac].aql_limit_low = q_limit_low; + sta->airtime[ac].aql_limit_high = q_limit_high; + } + } + mutex_unlock(&local->sta_mtx); + return count; +} + +static const struct file_operations aql_txq_limit_ops = { + .write = aql_txq_limit_write, + .read = aql_txq_limit_read, + .open = simple_open, + .llseek = default_llseek, +}; + static ssize_t force_tx_status_read(struct file *file, char __user *user_buf, size_t count, @@ -433,6 +516,7 @@ void debugfs_hw_add(struct ieee80211_local *local) DEBUGFS_ADD(hwflags); DEBUGFS_ADD(user_power); DEBUGFS_ADD(power); + DEBUGFS_ADD(hw_conf); DEBUGFS_ADD_MODE(force_tx_status, 0600); if (local->ops->wake_tx_queue) @@ -441,6 +525,10 @@ void debugfs_hw_add(struct ieee80211_local *local) debugfs_create_u16("airtime_flags", 0600, phyd, &local->airtime_flags); + DEBUGFS_ADD(aql_txq_limit); + debugfs_create_u32("aql_threshold", 0600, + phyd, &local->aql_threshold); + statsd = debugfs_create_dir("statistics", phyd); /* if the dir failed, don't put all the other things into the root! */ diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index c8ad20c28c43..0185e6e5e5d1 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -197,10 +197,12 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf, { struct sta_info *sta = file->private_data; struct ieee80211_local *local = sta->sdata->local; - size_t bufsz = 200; + size_t bufsz = 400; char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; u64 rx_airtime = 0, tx_airtime = 0; s64 deficit[IEEE80211_NUM_ACS]; + u32 q_depth[IEEE80211_NUM_ACS]; + u32 q_limit_l[IEEE80211_NUM_ACS], q_limit_h[IEEE80211_NUM_ACS]; ssize_t rv; int ac; @@ -212,19 +214,22 @@ static ssize_t sta_airtime_read(struct file *file, char __user *userbuf, rx_airtime += sta->airtime[ac].rx_airtime; tx_airtime += sta->airtime[ac].tx_airtime; deficit[ac] = sta->airtime[ac].deficit; + q_limit_l[ac] = sta->airtime[ac].aql_limit_low; + q_limit_h[ac] = sta->airtime[ac].aql_limit_high; spin_unlock_bh(&local->active_txq_lock[ac]); + q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending); } p += scnprintf(p, bufsz + buf - p, "RX: %llu us\nTX: %llu us\nWeight: %u\n" - "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n", - rx_airtime, - tx_airtime, - sta->airtime_weight, - deficit[0], - deficit[1], - deficit[2], - deficit[3]); + "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n" + "Q depth: VO: %u us VI: %u us BE: %u us BK: %u us\n" + "Q limit[low/high]: VO: %u/%u VI: %u/%u BE: %u/%u BK: %u/%u\n", + rx_airtime, tx_airtime, sta->airtime_weight, + deficit[0], deficit[1], deficit[2], deficit[3], + q_depth[0], q_depth[1], q_depth[2], q_depth[3], + q_limit_l[0], q_limit_h[0], q_limit_l[1], q_limit_h[1], + q_limit_l[2], q_limit_h[2], q_limit_l[3], q_limit_h[3]), rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); kfree(buf); @@ -236,7 +241,25 @@ static ssize_t sta_airtime_write(struct file *file, const char __user *userbuf, { struct sta_info *sta = file->private_data; struct ieee80211_local *local = sta->sdata->local; - int ac; + u32 ac, q_limit_l, q_limit_h; + char _buf[100] = {}, *buf = _buf; + + if (count > sizeof(_buf)) + return -EINVAL; + + if (copy_from_user(buf, userbuf, count)) + return -EFAULT; + + buf[sizeof(_buf) - 1] = '\0'; + if (sscanf(buf, "queue limit %u %u %u", &ac, &q_limit_l, &q_limit_h) + != 3) + return -EINVAL; + + if (ac >= IEEE80211_NUM_ACS) + return -EINVAL; + + sta->airtime[ac].aql_limit_low = q_limit_l; + sta->airtime[ac].aql_limit_high = q_limit_h; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { spin_lock_bh(&local->active_txq_lock[ac]); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 05406e9c05b3..ad15b3be8bb3 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1142,6 +1142,10 @@ struct ieee80211_local { u16 schedule_round[IEEE80211_NUM_ACS]; u16 airtime_flags; + u32 aql_txq_limit_low[IEEE80211_NUM_ACS]; + u32 aql_txq_limit_high[IEEE80211_NUM_ACS]; + u32 aql_threshold; + atomic_t aql_total_pending_airtime; const struct ieee80211_ops *ops; @@ -2249,6 +2253,10 @@ const char *ieee80211_get_reason_code_string(u16 reason_code); extern const struct ethtool_ops ieee80211_ethtool_ops; +u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *pubsta, + int len); #ifdef CONFIG_MAC80211_NOINLINE #define debug_noinline noinline #else diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 2d05c4cfaf6d..6cca0853f183 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -667,8 +667,16 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, for (i = 0; i < IEEE80211_NUM_ACS; i++) { INIT_LIST_HEAD(&local->active_txqs[i]); spin_lock_init(&local->active_txq_lock[i]); + local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L; + local->aql_txq_limit_high[i] = + IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H; } - local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX; + + local->airtime_flags = AIRTIME_USE_TX | + AIRTIME_USE_RX | + AIRTIME_USE_AQL; + local->aql_threshold = IEEE80211_AQL_THRESHOLD; + atomic_set(&local->aql_total_pending_airtime, 0); INIT_LIST_HEAD(&local->chanctx_list); mutex_init(&local->chanctx_mtx); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 8d3a2389b055..8eafd81e97b4 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -210,6 +210,20 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, return NULL; } +struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, + const u8 *sta_addr, const u8 *vif_addr) +{ + struct rhlist_head *tmp; + struct sta_info *sta; + + for_each_sta_info(local, sta_addr, sta, tmp) { + if (ether_addr_equal(vif_addr, sta->sdata->vif.addr)) + return sta; + } + + return NULL; +} + struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { @@ -396,6 +410,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, skb_queue_head_init(&sta->ps_tx_buf[i]); skb_queue_head_init(&sta->tx_filtered[i]); sta->airtime[i].deficit = sta->airtime_weight; + atomic_set(&sta->airtime[i].aql_tx_pending, 0); + sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i]; + sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i]; } for (i = 0; i < IEEE80211_NUM_TIDS; i++) @@ -1893,6 +1910,41 @@ void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, } EXPORT_SYMBOL(ieee80211_sta_register_airtime); +void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, + struct sta_info *sta, u8 ac, + u16 tx_airtime, bool tx_completed) +{ + int tx_pending; + + if (!tx_completed) { + if (sta) + atomic_add(tx_airtime, + &sta->airtime[ac].aql_tx_pending); + + atomic_add(tx_airtime, &local->aql_total_pending_airtime); + return; + } + + if (sta) { + tx_pending = atomic_sub_return(tx_airtime, + &sta->airtime[ac].aql_tx_pending); + if (WARN_ONCE(tx_pending < 0, + "STA %pM AC %d txq pending airtime underflow: %u, %u", + sta->addr, ac, tx_pending, tx_airtime)) + atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, + tx_pending, 0); + } + + tx_pending = atomic_sub_return(tx_airtime, + &local->aql_total_pending_airtime); + if (WARN_ONCE(tx_pending < 0, + "Device %s AC %d pending airtime underflow: %u, %u", + wiphy_name(local->hw.wiphy), ac, tx_pending, + tx_airtime)) + atomic_cmpxchg(&local->aql_total_pending_airtime, + tx_pending, 0); +} + int sta_info_move_state(struct sta_info *sta, enum ieee80211_sta_state new_state) { diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 369c2dddce52..ad5d8a4ae56d 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -127,13 +127,21 @@ enum ieee80211_agg_stop_reason { /* Debugfs flags to enable/disable use of RX/TX airtime in scheduler */ #define AIRTIME_USE_TX BIT(0) #define AIRTIME_USE_RX BIT(1) +#define AIRTIME_USE_AQL BIT(2) struct airtime_info { u64 rx_airtime; u64 tx_airtime; s64 deficit; + atomic_t aql_tx_pending; /* Estimated airtime for frames pending */ + u32 aql_limit_low; + u32 aql_limit_high; }; +void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, + struct sta_info *sta, u8 ac, + u16 tx_airtime, bool tx_completed); + struct sta_info; /** @@ -725,6 +733,10 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata, const u8 *addr); +/* user must hold sta_mtx or be in RCU critical section */ +struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local, + const u8 *sta_addr, const u8 *vif_addr); + #define for_each_sta_info(local, _addr, _sta, _tmp) \ rhl_for_each_entry_rcu(_sta, _tmp, \ sta_info_hash_lookup(local, _addr), hash_node) diff --git a/net/mac80211/status.c b/net/mac80211/status.c index ab8ba5835ca0..b720feaf9a74 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -670,12 +670,26 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, struct sk_buff *skb, bool dropped) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u16 tx_time_est = ieee80211_info_get_tx_time_est(info); struct ieee80211_hdr *hdr = (void *)skb->data; bool acked = info->flags & IEEE80211_TX_STAT_ACK; if (dropped) acked = false; + if (tx_time_est) { + struct sta_info *sta; + + rcu_read_lock(); + + sta = sta_info_get_by_addrs(local, hdr->addr1, hdr->addr2); + ieee80211_sta_update_pending_airtime(local, sta, + skb_get_queue_mapping(skb), + tx_time_est, + true); + rcu_read_unlock(); + } + if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) { struct ieee80211_sub_if_data *sdata; @@ -877,6 +891,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, struct ieee80211_bar *bar; int shift = 0; int tid = IEEE80211_NUM_TIDS; + u16 tx_time_est; rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count); @@ -986,6 +1001,17 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, ieee80211_sta_register_airtime(&sta->sta, tid, info->status.tx_time, 0); + if ((tx_time_est = ieee80211_info_get_tx_time_est(info)) > 0) { + /* Do this here to avoid the expensive lookup of the sta + * in ieee80211_report_used_skb(). + */ + ieee80211_sta_update_pending_airtime(local, sta, + skb_get_queue_mapping(skb), + tx_time_est, + true); + ieee80211_info_set_tx_time_est(info, 0); + } + if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) { if (info->flags & IEEE80211_TX_STAT_ACK) { if (sta->status_stats.lost_packets) @@ -1030,7 +1056,8 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, I802_DEBUG_INC(local->dot11FailedCount); } - if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) && + if ((ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && + ieee80211_has_pm(fc) && ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) && !(info->flags & IEEE80211_TX_CTL_INJECTED) && local->ps_sdata && !(local->scanning)) { @@ -1073,19 +1100,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) .skb = skb, .info = IEEE80211_SKB_CB(skb), }; - struct rhlist_head *tmp; struct sta_info *sta; rcu_read_lock(); - for_each_sta_info(local, hdr->addr1, sta, tmp) { - /* skip wrong virtual interface */ - if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr)) - continue; - + sta = sta_info_get_by_addrs(local, hdr->addr1, hdr->addr2); + if (sta) status.sta = &sta->sta; - break; - } __ieee80211_tx_status(hw, &status); rcu_read_unlock(); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index db38be1b75fa..b696b9136f4c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2270,6 +2270,9 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, * isn't always enough to find the interface to use; for proper * VLAN/WDS support we will need a different mechanism (which * likely isn't going to be monitor interfaces). + * + * This is necessary, for example, for old hostapd versions that + * don't use nl80211-based management TX/RX. */ sdata = IEEE80211_DEV_TO_SUB_IF(dev); @@ -3551,6 +3554,9 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, WARN_ON_ONCE(softirq_count() == 0); + if (!ieee80211_txq_airtime_check(hw, txq)) + return NULL; + begin: spin_lock_bh(&fq->lock); @@ -3661,6 +3667,21 @@ begin: } IEEE80211_SKB_CB(skb)->control.vif = vif; + + if (local->airtime_flags & AIRTIME_USE_AQL) { + u32 airtime; + + airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta, + skb->len); + if (airtime) { + airtime = ieee80211_info_set_tx_time_est(info, airtime); + ieee80211_sta_update_pending_airtime(local, tx.sta, + txq->ac, + airtime, + false); + } + } + return skb; out: @@ -3674,7 +3695,8 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_txq *ret = NULL; - struct txq_info *txqi = NULL; + struct txq_info *txqi = NULL, *head = NULL; + bool found_eligible_txq = false; spin_lock_bh(&local->active_txq_lock[ac]); @@ -3685,13 +3707,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) if (!txqi) goto out; + if (txqi == head) { + if (!found_eligible_txq) + goto out; + else + found_eligible_txq = false; + } + + if (!head) + head = txqi; + if (txqi->txq.sta) { struct sta_info *sta = container_of(txqi->txq.sta, - struct sta_info, sta); + struct sta_info, sta); + bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq); + s64 deficit = sta->airtime[txqi->txq.ac].deficit; + + if (aql_check) + found_eligible_txq = true; - if (sta->airtime[txqi->txq.ac].deficit < 0) { + if (deficit < 0) sta->airtime[txqi->txq.ac].deficit += sta->airtime_weight; + + if (deficit < 0 || !aql_check) { list_move_tail(&txqi->schedule_order, &local->active_txqs[txqi->txq.ac]); goto begin; @@ -3745,6 +3784,33 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw, } EXPORT_SYMBOL(__ieee80211_schedule_txq); +bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct sta_info *sta; + struct ieee80211_local *local = hw_to_local(hw); + + if (!(local->airtime_flags & AIRTIME_USE_AQL)) + return true; + + if (!txq->sta) + return true; + + sta = container_of(txq->sta, struct sta_info, sta); + if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) < + sta->airtime[txq->ac].aql_limit_low) + return true; + + if (atomic_read(&local->aql_total_pending_airtime) < + local->aql_threshold && + atomic_read(&sta->airtime[txq->ac].aql_tx_pending) < + sta->airtime[txq->ac].aql_limit_high) + return true; + + return false; +} +EXPORT_SYMBOL(ieee80211_txq_airtime_check); + bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 1a04e0929738..be5e95a0d876 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -25,7 +25,8 @@ /* 3 Counters support added */ /* 4 Comments support added */ /* 5 Forceadd support added */ -#define IPSET_TYPE_REV_MAX 6 /* skbinfo support added */ +/* 6 skbinfo support added */ +#define IPSET_TYPE_REV_MAX 7 /* interface wildcard support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>"); @@ -57,6 +58,7 @@ struct hash_netiface4_elem { u8 cidr; u8 nomatch; u8 elem; + u8 wildcard; char iface[IFNAMSIZ]; }; @@ -71,7 +73,9 @@ hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1, ip1->cidr == ip2->cidr && (++*multi) && ip1->physdev == ip2->physdev && - strcmp(ip1->iface, ip2->iface) == 0; + (ip1->wildcard ? + strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 : + strcmp(ip1->iface, ip2->iface) == 0); } static int @@ -103,7 +107,8 @@ static bool hash_netiface4_data_list(struct sk_buff *skb, const struct hash_netiface4_elem *data) { - u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; + u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) | + (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0); if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; @@ -229,6 +234,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], e.physdev = 1; if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); + if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD) + e.wildcard = 1; } if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { e.ip = htonl(ip & ip_set_hostmask(e.cidr)); @@ -280,6 +287,7 @@ struct hash_netiface6_elem { u8 cidr; u8 nomatch; u8 elem; + u8 wildcard; char iface[IFNAMSIZ]; }; @@ -294,7 +302,9 @@ hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1, ip1->cidr == ip2->cidr && (++*multi) && ip1->physdev == ip2->physdev && - strcmp(ip1->iface, ip2->iface) == 0; + (ip1->wildcard ? + strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 : + strcmp(ip1->iface, ip2->iface) == 0); } static int @@ -326,7 +336,8 @@ static bool hash_netiface6_data_list(struct sk_buff *skb, const struct hash_netiface6_elem *data) { - u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0; + u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) | + (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0); if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; @@ -440,6 +451,8 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], e.physdev = 1; if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); + if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD) + e.wildcard = 1; } ret = adtfn(set, &e, &ext, &ext, flags); diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 8468d2d02284..9889d52eda82 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -18,11 +18,11 @@ static DEFINE_MUTEX(flowtable_lock); static LIST_HEAD(flowtables); static void -flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, +flow_offload_fill_dir(struct flow_offload *flow, enum flow_offload_tuple_dir dir) { struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; - struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; + struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple; ft->dir = dir; @@ -57,8 +57,8 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct) flow->ct = ct; - flow_offload_fill_dir(flow, ct, FLOW_OFFLOAD_DIR_ORIGINAL); - flow_offload_fill_dir(flow, ct, FLOW_OFFLOAD_DIR_REPLY); + flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL); + flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY); if (ct->status & IPS_SRC_NAT) flow->flags |= FLOW_OFFLOAD_SNAT; diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c index bfb910b874ce..88bedf1ff1ae 100644 --- a/net/netfilter/nf_flow_table_inet.c +++ b/net/netfilter/nf_flow_table_inet.c @@ -21,11 +21,34 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb, return NF_ACCEPT; } +static int nf_flow_rule_route_inet(struct net *net, + const struct flow_offload *flow, + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule) +{ + const struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple; + int err; + + switch (flow_tuple->l3proto) { + case NFPROTO_IPV4: + err = nf_flow_rule_route_ipv4(net, flow, dir, flow_rule); + break; + case NFPROTO_IPV6: + err = nf_flow_rule_route_ipv6(net, flow, dir, flow_rule); + break; + default: + err = -1; + break; + } + + return err; +} + static struct nf_flowtable_type flowtable_inet = { .family = NFPROTO_INET, .init = nf_flow_table_init, .setup = nf_flow_table_offload_setup, - .action = nf_flow_rule_route, + .action = nf_flow_rule_route_inet, .free = nf_flow_table_free, .hook = nf_flow_offload_inet_hook, .owner = THIS_MODULE, diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index 9be61f47303a..c54c9a6cc981 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -112,13 +112,22 @@ static void flow_offload_mangle(struct flow_action_entry *entry, memcpy(&entry->mangle.val, value, sizeof(u32)); } +static inline struct flow_action_entry * +flow_action_entry_next(struct nf_flow_rule *flow_rule) +{ + int i = flow_rule->rule->action.num_entries++; + + return &flow_rule->rule->action.entries[i]; +} + static int flow_offload_eth_src(struct net *net, const struct flow_offload *flow, enum flow_offload_tuple_dir dir, - struct flow_action_entry *entry0, - struct flow_action_entry *entry1) + struct nf_flow_rule *flow_rule) { const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple; + struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule); + struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule); struct net_device *dev; u32 mask, val; u16 val16; @@ -145,10 +154,11 @@ static int flow_offload_eth_src(struct net *net, static int flow_offload_eth_dst(struct net *net, const struct flow_offload *flow, enum flow_offload_tuple_dir dir, - struct flow_action_entry *entry0, - struct flow_action_entry *entry1) + struct nf_flow_rule *flow_rule) { const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple; + struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule); + struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule); struct neighbour *n; u32 mask, val; u16 val16; @@ -175,8 +185,9 @@ static int flow_offload_eth_dst(struct net *net, static void flow_offload_ipv4_snat(struct net *net, const struct flow_offload *flow, enum flow_offload_tuple_dir dir, - struct flow_action_entry *entry) + struct nf_flow_rule *flow_rule) { + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); u32 mask = ~htonl(0xffffffff); __be32 addr; u32 offset; @@ -201,8 +212,9 @@ static void flow_offload_ipv4_snat(struct net *net, static void flow_offload_ipv4_dnat(struct net *net, const struct flow_offload *flow, enum flow_offload_tuple_dir dir, - struct flow_action_entry *entry) + struct nf_flow_rule *flow_rule) { + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); u32 mask = ~htonl(0xffffffff); __be32 addr; u32 offset; @@ -224,6 +236,71 @@ static void flow_offload_ipv4_dnat(struct net *net, (u8 *)&addr, (u8 *)&mask); } +static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule, + unsigned int offset, + u8 *addr, u8 *mask) +{ + struct flow_action_entry *entry; + int i; + + for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) { + entry = flow_action_entry_next(flow_rule); + flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6, + offset + i, + &addr[i], mask); + } +} + +static void flow_offload_ipv6_snat(struct net *net, + const struct flow_offload *flow, + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule) +{ + u32 mask = ~htonl(0xffffffff); + const u8 *addr; + u32 offset; + + switch (dir) { + case FLOW_OFFLOAD_DIR_ORIGINAL: + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr; + offset = offsetof(struct ipv6hdr, saddr); + break; + case FLOW_OFFLOAD_DIR_REPLY: + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr; + offset = offsetof(struct ipv6hdr, daddr); + break; + default: + return; + } + + flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask); +} + +static void flow_offload_ipv6_dnat(struct net *net, + const struct flow_offload *flow, + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule) +{ + u32 mask = ~htonl(0xffffffff); + const u8 *addr; + u32 offset; + + switch (dir) { + case FLOW_OFFLOAD_DIR_ORIGINAL: + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr; + offset = offsetof(struct ipv6hdr, daddr); + break; + case FLOW_OFFLOAD_DIR_REPLY: + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr; + offset = offsetof(struct ipv6hdr, saddr); + break; + default: + return; + } + + flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask); +} + static int flow_offload_l4proto(const struct flow_offload *flow) { u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto; @@ -246,8 +323,9 @@ static int flow_offload_l4proto(const struct flow_offload *flow) static void flow_offload_port_snat(struct net *net, const struct flow_offload *flow, enum flow_offload_tuple_dir dir, - struct flow_action_entry *entry) + struct nf_flow_rule *flow_rule) { + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); u32 mask = ~htonl(0xffff0000); __be16 port; u32 offset; @@ -272,8 +350,9 @@ static void flow_offload_port_snat(struct net *net, static void flow_offload_port_dnat(struct net *net, const struct flow_offload *flow, enum flow_offload_tuple_dir dir, - struct flow_action_entry *entry) + struct nf_flow_rule *flow_rule) { + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); u32 mask = ~htonl(0xffff); __be16 port; u32 offset; @@ -297,9 +376,10 @@ static void flow_offload_port_dnat(struct net *net, static void flow_offload_ipv4_checksum(struct net *net, const struct flow_offload *flow, - struct flow_action_entry *entry) + struct nf_flow_rule *flow_rule) { u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto; + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); entry->id = FLOW_ACTION_CSUM; entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR; @@ -316,8 +396,9 @@ static void flow_offload_ipv4_checksum(struct net *net, static void flow_offload_redirect(const struct flow_offload *flow, enum flow_offload_tuple_dir dir, - struct flow_action_entry *entry) + struct nf_flow_rule *flow_rule) { + struct flow_action_entry *entry = flow_action_entry_next(flow_rule); struct rtable *rt; rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache; @@ -326,45 +407,56 @@ static void flow_offload_redirect(const struct flow_offload *flow, dev_hold(rt->dst.dev); } -int nf_flow_rule_route(struct net *net, const struct flow_offload *flow, - enum flow_offload_tuple_dir dir, - struct nf_flow_rule *flow_rule) +int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow, + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule) { - int i; - - if (flow_offload_eth_src(net, flow, dir, - &flow_rule->rule->action.entries[0], - &flow_rule->rule->action.entries[1]) < 0) + if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 || + flow_offload_eth_dst(net, flow, dir, flow_rule) < 0) return -1; - if (flow_offload_eth_dst(net, flow, dir, - &flow_rule->rule->action.entries[2], - &flow_rule->rule->action.entries[3]) < 0) - return -1; - - i = 4; if (flow->flags & FLOW_OFFLOAD_SNAT) { - flow_offload_ipv4_snat(net, flow, dir, - &flow_rule->rule->action.entries[i++]); - flow_offload_port_snat(net, flow, dir, - &flow_rule->rule->action.entries[i++]); + flow_offload_ipv4_snat(net, flow, dir, flow_rule); + flow_offload_port_snat(net, flow, dir, flow_rule); } if (flow->flags & FLOW_OFFLOAD_DNAT) { - flow_offload_ipv4_dnat(net, flow, dir, - &flow_rule->rule->action.entries[i++]); - flow_offload_port_dnat(net, flow, dir, - &flow_rule->rule->action.entries[i++]); + flow_offload_ipv4_dnat(net, flow, dir, flow_rule); + flow_offload_port_dnat(net, flow, dir, flow_rule); } if (flow->flags & FLOW_OFFLOAD_SNAT || flow->flags & FLOW_OFFLOAD_DNAT) - flow_offload_ipv4_checksum(net, flow, - &flow_rule->rule->action.entries[i++]); + flow_offload_ipv4_checksum(net, flow, flow_rule); - flow_offload_redirect(flow, dir, &flow_rule->rule->action.entries[i++]); + flow_offload_redirect(flow, dir, flow_rule); - return i; + return 0; } -EXPORT_SYMBOL_GPL(nf_flow_rule_route); +EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4); + +int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow, + enum flow_offload_tuple_dir dir, + struct nf_flow_rule *flow_rule) +{ + if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 || + flow_offload_eth_dst(net, flow, dir, flow_rule) < 0) + return -1; + + if (flow->flags & FLOW_OFFLOAD_SNAT) { + flow_offload_ipv6_snat(net, flow, dir, flow_rule); + flow_offload_port_snat(net, flow, dir, flow_rule); + } + if (flow->flags & FLOW_OFFLOAD_DNAT) { + flow_offload_ipv6_dnat(net, flow, dir, flow_rule); + flow_offload_port_dnat(net, flow, dir, flow_rule); + } + + flow_offload_redirect(flow, dir, flow_rule); + + return 0; +} +EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6); + +#define NF_FLOW_RULE_ACTION_MAX 16 static struct nf_flow_rule * nf_flow_offload_rule_alloc(struct net *net, @@ -375,13 +467,13 @@ nf_flow_offload_rule_alloc(struct net *net, const struct flow_offload *flow = offload->flow; const struct flow_offload_tuple *tuple; struct nf_flow_rule *flow_rule; - int err = -ENOMEM, num_actions; + int err = -ENOMEM; flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL); if (!flow_rule) goto err_flow; - flow_rule->rule = flow_rule_alloc(10); + flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX); if (!flow_rule->rule) goto err_flow_rule; @@ -394,12 +486,10 @@ nf_flow_offload_rule_alloc(struct net *net, if (err < 0) goto err_flow_match; - num_actions = flowtable->type->action(net, flow, dir, flow_rule); - if (num_actions < 0) + flow_rule->rule->action.num_entries = 0; + if (flowtable->type->action(net, flow, dir, flow_rule) < 0) goto err_flow_match; - flow_rule->rule->action.num_entries = num_actions; - return flow_rule; err_flow_match: @@ -722,6 +812,9 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD)) return 0; + if (!dev->netdev_ops->ndo_setup_tc) + return -EOPNOTSUPP; + bo.net = dev_net(dev); bo.block = &flowtable->flow_block; bo.command = cmd; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 2dc636faa322..ff04cdc87f76 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -361,6 +361,7 @@ static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type, static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) { + struct nft_flow_rule *flow; struct nft_trans *trans; int err; @@ -368,6 +369,16 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) if (trans == NULL) return -ENOMEM; + if (ctx->chain->flags & NFT_CHAIN_HW_OFFLOAD) { + flow = nft_flow_rule_create(ctx->net, rule); + if (IS_ERR(flow)) { + nft_trans_destroy(trans); + return PTR_ERR(flow); + } + + nft_trans_flow_rule(trans) = flow; + } + err = nf_tables_delrule_deactivate(ctx, rule); if (err < 0) { nft_trans_destroy(trans); @@ -5964,16 +5975,22 @@ nft_flowtable_type_get(struct net *net, u8 family) return ERR_PTR(-ENOENT); } +static void nft_unregister_flowtable_hook(struct net *net, + struct nft_flowtable *flowtable, + struct nft_hook *hook) +{ + nf_unregister_net_hook(net, &hook->ops); + flowtable->data.type->setup(&flowtable->data, hook->ops.dev, + FLOW_BLOCK_UNBIND); +} + static void nft_unregister_flowtable_net_hooks(struct net *net, struct nft_flowtable *flowtable) { struct nft_hook *hook; - list_for_each_entry(hook, &flowtable->hook_list, list) { - nf_unregister_net_hook(net, &hook->ops); - flowtable->data.type->setup(&flowtable->data, hook->ops.dev, - FLOW_BLOCK_UNBIND); - } + list_for_each_entry(hook, &flowtable->hook_list, list) + nft_unregister_flowtable_hook(net, flowtable, hook); } static int nft_register_flowtable_net_hooks(struct net *net, @@ -5995,12 +6012,20 @@ static int nft_register_flowtable_net_hooks(struct net *net, } } - flowtable->data.type->setup(&flowtable->data, hook->ops.dev, - FLOW_BLOCK_BIND); - err = nf_register_net_hook(net, &hook->ops); + err = flowtable->data.type->setup(&flowtable->data, + hook->ops.dev, + FLOW_BLOCK_BIND); if (err < 0) goto err_unregister_net_hooks; + err = nf_register_net_hook(net, &hook->ops); + if (err < 0) { + flowtable->data.type->setup(&flowtable->data, + hook->ops.dev, + FLOW_BLOCK_UNBIND); + goto err_unregister_net_hooks; + } + i++; } @@ -6011,9 +6036,7 @@ err_unregister_net_hooks: if (i-- <= 0) break; - nf_unregister_net_hook(net, &hook->ops); - flowtable->data.type->setup(&flowtable->data, hook->ops.dev, - FLOW_BLOCK_UNBIND); + nft_unregister_flowtable_hook(net, flowtable, hook); list_del_rcu(&hook->list); kfree_rcu(hook, rcu); } @@ -6120,7 +6143,7 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, return 0; err5: list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) { - nf_unregister_net_hook(net, &hook->ops); + nft_unregister_flowtable_hook(net, flowtable, hook); list_del_rcu(&hook->list); kfree_rcu(hook, rcu); } @@ -6465,7 +6488,7 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev, if (hook->ops.dev != dev) continue; - nf_unregister_net_hook(dev_net(dev), &hook->ops); + nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook); list_del_rcu(&hook->list); kfree_rcu(hook, rcu); break; diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index cdea3010c7a0..68f17a6921d8 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -159,9 +159,9 @@ static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow, const struct nft_base_chain *basechain, const struct nft_rule *rule, const struct nft_flow_rule *flow, + struct netlink_ext_ack *extack, enum flow_cls_command command) { - struct netlink_ext_ack extack; __be16 proto = ETH_P_ALL; memset(cls_flow, 0, sizeof(*cls_flow)); @@ -170,7 +170,7 @@ static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow, proto = flow->proto; nft_flow_offload_common_init(&cls_flow->common, proto, - basechain->ops.priority, &extack); + basechain->ops.priority, extack); cls_flow->command = command; cls_flow->cookie = (unsigned long) rule; if (flow) @@ -182,6 +182,7 @@ static int nft_flow_offload_rule(struct nft_chain *chain, struct nft_flow_rule *flow, enum flow_cls_command command) { + struct netlink_ext_ack extack = {}; struct flow_cls_offload cls_flow; struct nft_base_chain *basechain; @@ -189,7 +190,8 @@ static int nft_flow_offload_rule(struct nft_chain *chain, return -EOPNOTSUPP; basechain = nft_base_chain(chain); - nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, command); + nft_flow_cls_offload_setup(&cls_flow, basechain, rule, flow, &extack, + command); return nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &basechain->flow_block.cb_list); @@ -207,13 +209,15 @@ static int nft_flow_offload_unbind(struct flow_block_offload *bo, { struct flow_block_cb *block_cb, *next; struct flow_cls_offload cls_flow; + struct netlink_ext_ack extack; struct nft_chain *chain; struct nft_rule *rule; chain = &basechain->chain; list_for_each_entry(rule, &chain->rules, list) { + memset(&extack, 0, sizeof(extack)); nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL, - FLOW_CLS_DESTROY); + &extack, FLOW_CLS_DESTROY); nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list); } @@ -385,6 +389,55 @@ static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy, return nft_flow_block_chain(basechain, NULL, cmd); } +static void nft_flow_rule_offload_abort(struct net *net, + struct nft_trans *trans) +{ + int err = 0; + + list_for_each_entry_continue_reverse(trans, &net->nft.commit_list, list) { + if (trans->ctx.family != NFPROTO_NETDEV) + continue; + + switch (trans->msg_type) { + case NFT_MSG_NEWCHAIN: + if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) || + nft_trans_chain_update(trans)) + continue; + + err = nft_flow_offload_chain(trans->ctx.chain, NULL, + FLOW_BLOCK_UNBIND); + break; + case NFT_MSG_DELCHAIN: + if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + continue; + + err = nft_flow_offload_chain(trans->ctx.chain, NULL, + FLOW_BLOCK_BIND); + break; + case NFT_MSG_NEWRULE: + if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + continue; + + err = nft_flow_offload_rule(trans->ctx.chain, + nft_trans_rule(trans), + NULL, FLOW_CLS_DESTROY); + break; + case NFT_MSG_DELRULE: + if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + continue; + + err = nft_flow_offload_rule(trans->ctx.chain, + nft_trans_rule(trans), + nft_trans_flow_rule(trans), + FLOW_CLS_REPLACE); + break; + } + + if (WARN_ON_ONCE(err)) + break; + } +} + int nft_flow_rule_offload_commit(struct net *net) { struct nft_trans *trans; @@ -418,14 +471,14 @@ int nft_flow_rule_offload_commit(struct net *net) continue; if (trans->ctx.flags & NLM_F_REPLACE || - !(trans->ctx.flags & NLM_F_APPEND)) - return -EOPNOTSUPP; - + !(trans->ctx.flags & NLM_F_APPEND)) { + err = -EOPNOTSUPP; + break; + } err = nft_flow_offload_rule(trans->ctx.chain, nft_trans_rule(trans), nft_trans_flow_rule(trans), FLOW_CLS_REPLACE); - nft_flow_rule_destroy(nft_trans_flow_rule(trans)); break; case NFT_MSG_DELRULE: if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) @@ -433,13 +486,31 @@ int nft_flow_rule_offload_commit(struct net *net) err = nft_flow_offload_rule(trans->ctx.chain, nft_trans_rule(trans), - nft_trans_flow_rule(trans), - FLOW_CLS_DESTROY); + NULL, FLOW_CLS_DESTROY); break; } - if (err) - return err; + if (err) { + nft_flow_rule_offload_abort(net, trans); + break; + } + } + + list_for_each_entry(trans, &net->nft.commit_list, list) { + if (trans->ctx.family != NFPROTO_NETDEV) + continue; + + switch (trans->msg_type) { + case NFT_MSG_NEWRULE: + case NFT_MSG_DELRULE: + if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + continue; + + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); + break; + default: + break; + } } return err; diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index 0744b2bb46da..b8092069f868 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> +#include <linux/if_arp.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables_offload.h> @@ -125,6 +126,11 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx, flow->match.dissector.used_keys |= BIT(reg->key); flow->match.dissector.offset[reg->key] = reg->base_offset; + if (reg->key == FLOW_DISSECTOR_KEY_META && + reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) && + nft_reg_load16(priv->data.data) != ARPHRD_ETHER) + return -EOPNOTSUPP; + nft_offload_update_dependency(ctx, &priv->data, priv->len); return 0; diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 317e3a9e8c5b..9740b554fdb3 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -33,19 +33,19 @@ static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state); -static u8 nft_meta_weekday(unsigned long secs) +static u8 nft_meta_weekday(time64_t secs) { unsigned int dse; u8 wday; secs -= NFT_META_SECS_PER_MINUTE * sys_tz.tz_minuteswest; - dse = secs / NFT_META_SECS_PER_DAY; + dse = div_u64(secs, NFT_META_SECS_PER_DAY); wday = (4 + dse) % NFT_META_DAYS_PER_WEEK; return wday; } -static u32 nft_meta_hour(unsigned long secs) +static u32 nft_meta_hour(time64_t secs) { struct tm tm; @@ -250,10 +250,10 @@ void nft_meta_get_eval(const struct nft_expr *expr, nft_reg_store64(dest, ktime_get_real_ns()); break; case NFT_META_TIME_DAY: - nft_reg_store8(dest, nft_meta_weekday(get_seconds())); + nft_reg_store8(dest, nft_meta_weekday(ktime_get_real_seconds())); break; case NFT_META_TIME_HOUR: - *dest = nft_meta_hour(get_seconds()); + *dest = nft_meta_hour(ktime_get_real_seconds()); break; default: WARN_ON(1); @@ -547,6 +547,14 @@ static int nft_meta_get_offload(struct nft_offload_ctx *ctx, sizeof(__u8), reg); nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT); break; + case NFT_META_IIF: + NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta, + ingress_ifindex, sizeof(__u32), reg); + break; + case NFT_META_IIFTYPE: + NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta, + ingress_iftype, sizeof(__u16), reg); + break; default: return -EOPNOTSUPP; } diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 5cb2d8908d2a..1993af3a2979 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -23,50 +23,58 @@ #include <linux/ip.h> #include <linux/ipv6.h> +static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off, + struct vlan_ethhdr *veth) +{ + if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN)) + return false; + + veth->h_vlan_proto = skb->vlan_proto; + veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb)); + veth->h_vlan_encapsulated_proto = skb->protocol; + + return true; +} + /* add vlan header into the user buffer for if tag was removed by offloads */ static bool nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len) { int mac_off = skb_mac_header(skb) - skb->data; - u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d; + u8 *vlanh, *dst_u8 = (u8 *) d; struct vlan_ethhdr veth; + u8 vlan_hlen = 0; + + if ((skb->protocol == htons(ETH_P_8021AD) || + skb->protocol == htons(ETH_P_8021Q)) && + offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN) + vlan_hlen += VLAN_HLEN; vlanh = (u8 *) &veth; - if (offset < ETH_HLEN) { - u8 ethlen = min_t(u8, len, ETH_HLEN - offset); + if (offset < VLAN_ETH_HLEN + vlan_hlen) { + u8 ethlen = len; - if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN)) + if (vlan_hlen && + skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0) + return false; + else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth)) return false; - veth.h_vlan_proto = skb->vlan_proto; + if (offset + len > VLAN_ETH_HLEN + vlan_hlen) + ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen; - memcpy(dst_u8, vlanh + offset, ethlen); + memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen); len -= ethlen; if (len == 0) return true; dst_u8 += ethlen; - offset = ETH_HLEN; - } else if (offset >= VLAN_ETH_HLEN) { - offset -= VLAN_HLEN; - goto skip; + offset = ETH_HLEN + vlan_hlen; + } else { + offset -= VLAN_HLEN + vlan_hlen; } - veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); - veth.h_vlan_encapsulated_proto = skb->protocol; - - vlanh += offset; - - vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset); - memcpy(dst_u8, vlanh, vlan_len); - - len -= vlan_len; - if (!len) - return true; - - dst_u8 += vlan_len; - skip: return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0; } @@ -174,6 +182,44 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx, NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs, dst, ETH_ALEN, reg); break; + case offsetof(struct ethhdr, h_proto): + if (priv->len != sizeof(__be16)) + return -EOPNOTSUPP; + + NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, + n_proto, sizeof(__be16), reg); + nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK); + break; + case offsetof(struct vlan_ethhdr, h_vlan_TCI): + if (priv->len != sizeof(__be16)) + return -EOPNOTSUPP; + + NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan, + vlan_tci, sizeof(__be16), reg); + break; + case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto): + if (priv->len != sizeof(__be16)) + return -EOPNOTSUPP; + + NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan, + vlan_tpid, sizeof(__be16), reg); + nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK); + break; + case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr): + if (priv->len != sizeof(__be16)) + return -EOPNOTSUPP; + + NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan, + vlan_tci, sizeof(__be16), reg); + break; + case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) + + sizeof(struct vlan_hdr): + if (priv->len != sizeof(__be16)) + return -EOPNOTSUPP; + + NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan, + vlan_tpid, sizeof(__be16), reg); + break; default: return -EOPNOTSUPP; } diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index 8dbb4d48f2ed..67cb98489415 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c @@ -77,12 +77,12 @@ static inline bool is_leap(unsigned int y) * This is done in three separate functions so that the most expensive * calculations are done last, in case a "simple match" can be found earlier. */ -static inline unsigned int localtime_1(struct xtm *r, time_t time) +static inline unsigned int localtime_1(struct xtm *r, time64_t time) { unsigned int v, w; /* Each day has 86400s, so finding the hour/minute is actually easy. */ - v = time % SECONDS_PER_DAY; + div_u64_rem(time, SECONDS_PER_DAY, &v); r->second = v % 60; w = v / 60; r->minute = w % 60; @@ -90,13 +90,13 @@ static inline unsigned int localtime_1(struct xtm *r, time_t time) return v; } -static inline void localtime_2(struct xtm *r, time_t time) +static inline void localtime_2(struct xtm *r, time64_t time) { /* * Here comes the rest (weekday, monthday). First, divide the SSTE * by seconds-per-day to get the number of _days_ since the epoch. */ - r->dse = time / 86400; + r->dse = div_u64(time, SECONDS_PER_DAY); /* * 1970-01-01 (w=0) was a Thursday (4). @@ -105,7 +105,7 @@ static inline void localtime_2(struct xtm *r, time_t time) r->weekday = (4 + r->dse - 1) % 7 + 1; } -static void localtime_3(struct xtm *r, time_t time) +static void localtime_3(struct xtm *r, time64_t time) { unsigned int year, i, w = r->dse; @@ -160,7 +160,7 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par) const struct xt_time_info *info = par->matchinfo; unsigned int packet_time; struct xtm current_time; - s64 stamp; + time64_t stamp; /* * We need real time here, but we can neither use skb->tstamp @@ -173,14 +173,14 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par) * 1. match before 13:00 * 2. match after 13:00 * - * If you match against processing time (get_seconds) it + * If you match against processing time (ktime_get_real_seconds) it * may happen that the same packet matches both rules if * it arrived at the right moment before 13:00, so it would be * better to check skb->tstamp and set it via __net_timestamp() * if needed. This however breaks outgoing packets tx timestamp, * and causes them to get delayed forever by fq packet scheduler. */ - stamp = get_seconds(); + stamp = ktime_get_real_seconds(); if (info->flags & XT_TIME_LOCAL_TZ) /* Adjust for local timezone */ @@ -193,6 +193,9 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par) * - 'now' is in the weekday mask * - 'now' is in the daytime range time_start..time_end * (and by default, libxt_time will set these so as to match) + * + * note: info->date_start/stop are unsigned 32-bit values that + * can hold values beyond y2038, but not after y2106. */ if (stamp < info->date_start || stamp > info->date_stop) diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig index 97bd3a2c5c98..4822d6f46947 100644 --- a/net/nfc/hci/Kconfig +++ b/net/nfc/hci/Kconfig @@ -1,12 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-only config NFC_HCI - depends on NFC - tristate "NFC HCI implementation" - default n - help - Say Y here if you want to build support for a kernel NFC HCI - implementation. This is mostly needed for devices that only process - HCI frames, like for example the NXP pn544. + depends on NFC + tristate "NFC HCI implementation" + default n + help + Say Y here if you want to build support for a kernel NFC HCI + implementation. This is mostly needed for devices that only process + HCI frames, like for example the NXP pn544. config NFC_SHDLC depends on NFC_HCI diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 68d6af56b243..c13638aeef46 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -474,7 +474,6 @@ drop: } static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = { - [TCA_CT_UNSPEC] = { .strict_start_type = TCA_CT_UNSPEC + 1 }, [TCA_CT_ACTION] = { .type = NLA_U16 }, [TCA_CT_PARMS] = { .type = NLA_EXACT_LEN, .len = sizeof(struct tc_ct) }, [TCA_CT_ZONE] = { .type = NLA_U16 }, diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index 4d8c822b6aca..c7d5e12ee919 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -119,7 +119,6 @@ static int valid_label(const struct nlattr *attr, } static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = { - [TCA_MPLS_UNSPEC] = { .strict_start_type = TCA_MPLS_UNSPEC + 1 }, [TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)), [TCA_MPLS_PROTO] = { .type = NLA_U16 }, [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label), diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index d5eff6ac17a9..3ad718576304 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -43,7 +43,7 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, int err = -EINVAL; int rem; - if (!nla || !n) + if (!nla) return NULL; keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL); @@ -171,6 +171,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, } parm = nla_data(pattr); + if (!parm->nkeys) { + NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); + return -EINVAL; + } ksize = parm->nkeys * sizeof(struct tc_pedit_key); if (nla_len(pattr) < sizeof(*parm) + ksize) { NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid"); @@ -184,12 +188,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { - if (!parm->nkeys) { - tcf_idr_cleanup(tn, index); - NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); - ret = -EINVAL; - goto out_free; - } ret = tcf_idr_create(tn, index, est, a, &act_pedit_ops, bind, false, 0); if (ret) { diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index cb34e5d57aaa..6379f9568ab8 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -10,6 +10,8 @@ #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/geneve.h> +#include <net/vxlan.h> +#include <net/erspan.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/dst.h> @@ -53,7 +55,11 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a, static const struct nla_policy enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = { + [TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC] = { + .strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN }, [TCA_TUNNEL_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, + [TCA_TUNNEL_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, + [TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, }; static const struct nla_policy @@ -64,6 +70,19 @@ geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = { .len = 128 }, }; +static const struct nla_policy +vxlan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1] = { + [TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 }, +}; + +static const struct nla_policy +erspan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1] = { + [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, + [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, + [TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, +}; + static int tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len, struct netlink_ext_ack *extack) @@ -116,10 +135,89 @@ tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len, return opt_len; } +static int +tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1]; + int err; + + err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX, nla, + vxlan_opt_policy, extack); + if (err < 0) + return err; + + if (!tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp"); + return -EINVAL; + } + + if (dst) { + struct vxlan_metadata *md = dst; + + md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]); + } + + return sizeof(struct vxlan_metadata); +} + +static int +tunnel_key_copy_erspan_opt(const struct nlattr *nla, void *dst, int dst_len, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1]; + int err; + u8 ver; + + err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX, nla, + erspan_opt_policy, extack); + if (err < 0) + return err; + + if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver"); + return -EINVAL; + } + + ver = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]); + if (ver == 1) { + if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); + return -EINVAL; + } + } else if (ver == 2) { + if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] || + !tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); + return -EINVAL; + } + } else { + NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect"); + return -EINVAL; + } + + if (dst) { + struct erspan_metadata *md = dst; + + md->version = ver; + if (ver == 1) { + nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]; + md->u.index = nla_get_be32(nla); + } else { + nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR]; + md->u.md2.dir = nla_get_u8(nla); + nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]; + set_hwid(&md->u.md2, nla_get_u8(nla)); + } + } + + return sizeof(struct erspan_metadata); +} + static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst, int dst_len, struct netlink_ext_ack *extack) { - int err, rem, opt_len, len = nla_len(nla), opts_len = 0; + int err, rem, opt_len, len = nla_len(nla), opts_len = 0, type = 0; const struct nlattr *attr, *head = nla_data(nla); err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX, @@ -130,15 +228,48 @@ static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst, nla_for_each_attr(attr, head, len, rem) { switch (nla_type(attr)) { case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: + if (type && type != TUNNEL_GENEVE_OPT) { + NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); + return -EINVAL; + } opt_len = tunnel_key_copy_geneve_opt(attr, dst, dst_len, extack); if (opt_len < 0) return opt_len; opts_len += opt_len; + if (opts_len > IP_TUNNEL_OPTS_MAX) { + NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); + return -EINVAL; + } if (dst) { dst_len -= opt_len; dst += opt_len; } + type = TUNNEL_GENEVE_OPT; + break; + case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN: + if (type) { + NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options"); + return -EINVAL; + } + opt_len = tunnel_key_copy_vxlan_opt(attr, dst, + dst_len, extack); + if (opt_len < 0) + return opt_len; + opts_len += opt_len; + type = TUNNEL_VXLAN_OPT; + break; + case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN: + if (type) { + NL_SET_ERR_MSG(extack, "Duplicate type for erspan options"); + return -EINVAL; + } + opt_len = tunnel_key_copy_erspan_opt(attr, dst, + dst_len, extack); + if (opt_len < 0) + return opt_len; + opts_len += opt_len; + type = TUNNEL_ERSPAN_OPT; break; } } @@ -175,6 +306,22 @@ static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info, #else return -EAFNOSUPPORT; #endif + case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN: +#if IS_ENABLED(CONFIG_INET) + info->key.tun_flags |= TUNNEL_VXLAN_OPT; + return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info), + opts_len, extack); +#else + return -EAFNOSUPPORT; +#endif + case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN: +#if IS_ENABLED(CONFIG_INET) + info->key.tun_flags |= TUNNEL_ERSPAN_OPT; + return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info), + opts_len, extack); +#else + return -EAFNOSUPPORT; +#endif default: NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type"); return -EINVAL; @@ -451,6 +598,56 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, return 0; } +static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1); + struct nlattr *start; + + start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN); + if (!start) + return -EMSGSIZE; + + if (nla_put_u32(skb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) { + nla_nest_cancel(skb, start); + return -EMSGSIZE; + } + + nla_nest_end(skb, start); + return 0; +} + +static int tunnel_key_erspan_opts_dump(struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + struct erspan_metadata *md = (struct erspan_metadata *)(info + 1); + struct nlattr *start; + + start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN); + if (!start) + return -EMSGSIZE; + + if (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, md->version)) + goto err; + + if (md->version == 1 && + nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index)) + goto err; + + if (md->version == 2 && + (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR, + md->u.md2.dir) || + nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID, + get_hwid(&md->u.md2)))) + goto err; + + nla_nest_end(skb, start); + return 0; +err: + nla_nest_cancel(skb, start); + return -EMSGSIZE; +} + static int tunnel_key_opts_dump(struct sk_buff *skb, const struct ip_tunnel_info *info) { @@ -468,6 +665,14 @@ static int tunnel_key_opts_dump(struct sk_buff *skb, err = tunnel_key_geneve_opts_dump(skb, info); if (err) goto err_out; + } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) { + err = tunnel_key_vxlan_opts_dump(skb, info); + if (err) + goto err_out; + } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) { + err = tunnel_key_erspan_opts_dump(skb, info); + if (err) + goto err_out; } else { err_out: nla_nest_cancel(skb, start); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 74221e3351c3..c307ee1d6ca6 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -22,6 +22,8 @@ #include <net/ip.h> #include <net/flow_dissector.h> #include <net/geneve.h> +#include <net/vxlan.h> +#include <net/erspan.h> #include <net/dst.h> #include <net/dst_metadata.h> @@ -688,7 +690,11 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { static const struct nla_policy enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { + [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = { + .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN }, [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, + [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, + [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, }; static const struct nla_policy @@ -699,6 +705,19 @@ geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { .len = 128 }, }; +static const struct nla_policy +vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = { + [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 }, +}; + +static const struct nla_policy +erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { + [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, +}; + static void fl_set_key_val(struct nlattr **tb, void *val, int val_type, void *mask, int mask_type, int len) @@ -928,6 +947,105 @@ static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, return sizeof(struct geneve_opt) + data_len; } +static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key, + int depth, int option_len, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1]; + struct vxlan_metadata *md; + int err; + + md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len]; + memset(md, 0xff, sizeof(*md)); + + if (!depth) + return sizeof(*md); + + if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) { + NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask"); + return -EINVAL; + } + + err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla, + vxlan_opt_policy, extack); + if (err < 0) + return err; + + if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp"); + return -EINVAL; + } + + if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) + md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]); + + return sizeof(*md); +} + +static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, + int depth, int option_len, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1]; + struct erspan_metadata *md; + int err; + + md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; + memset(md, 0xff, sizeof(*md)); + md->version = 1; + + if (!depth) + return sizeof(*md); + + if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) { + NL_SET_ERR_MSG(extack, "Non-erspan option type for mask"); + return -EINVAL; + } + + err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla, + erspan_opt_policy, extack); + if (err < 0) + return err; + + if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver"); + return -EINVAL; + } + + if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) + md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]); + + if (md->version == 1) { + if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { + NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); + return -EINVAL; + } + if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { + nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; + md->u.index = nla_get_be32(nla); + } + } else if (md->version == 2) { + if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] || + !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) { + NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); + return -EINVAL; + } + if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { + nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; + md->u.md2.dir = nla_get_u8(nla); + } + if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { + nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; + set_hwid(&md->u.md2, nla_get_u8(nla)); + } + } else { + NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect"); + return -EINVAL; + } + + return sizeof(*md); +} + static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask, struct netlink_ext_ack *extack) @@ -958,6 +1076,11 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { switch (nla_type(nla_opt_key)) { case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: + if (key->enc_opts.dst_opt_type && + key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) { + NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); + return -EINVAL; + } option_len = 0; key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; option_len = fl_set_geneve_opt(nla_opt_key, key, @@ -986,6 +1109,72 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, if (msk_depth) nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); break; + case TCA_FLOWER_KEY_ENC_OPTS_VXLAN: + if (key->enc_opts.dst_opt_type) { + NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options"); + return -EINVAL; + } + option_len = 0; + key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; + option_len = fl_set_vxlan_opt(nla_opt_key, key, + key_depth, option_len, + extack); + if (option_len < 0) + return option_len; + + key->enc_opts.len += option_len; + /* At the same time we need to parse through the mask + * in order to verify exact and mask attribute lengths. + */ + mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; + option_len = fl_set_vxlan_opt(nla_opt_msk, mask, + msk_depth, option_len, + extack); + if (option_len < 0) + return option_len; + + mask->enc_opts.len += option_len; + if (key->enc_opts.len != mask->enc_opts.len) { + NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); + return -EINVAL; + } + + if (msk_depth) + nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); + break; + case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN: + if (key->enc_opts.dst_opt_type) { + NL_SET_ERR_MSG(extack, "Duplicate type for erspan options"); + return -EINVAL; + } + option_len = 0; + key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; + option_len = fl_set_erspan_opt(nla_opt_key, key, + key_depth, option_len, + extack); + if (option_len < 0) + return option_len; + + key->enc_opts.len += option_len; + /* At the same time we need to parse through the mask + * in order to verify exact and mask attribute lengths. + */ + mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; + option_len = fl_set_erspan_opt(nla_opt_msk, mask, + msk_depth, option_len, + extack); + if (option_len < 0) + return option_len; + + mask->enc_opts.len += option_len; + if (key->enc_opts.len != mask->enc_opts.len) { + NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); + return -EINVAL; + } + + if (msk_depth) + nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); + break; default: NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); return -EINVAL; @@ -2135,6 +2324,61 @@ nla_put_failure: return -EMSGSIZE; } +static int fl_dump_key_vxlan_opt(struct sk_buff *skb, + struct flow_dissector_key_enc_opts *enc_opts) +{ + struct vxlan_metadata *md; + struct nlattr *nest; + + nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN); + if (!nest) + goto nla_put_failure; + + md = (struct vxlan_metadata *)&enc_opts->data[0]; + if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) + goto nla_put_failure; + + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + +static int fl_dump_key_erspan_opt(struct sk_buff *skb, + struct flow_dissector_key_enc_opts *enc_opts) +{ + struct erspan_metadata *md; + struct nlattr *nest; + + nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN); + if (!nest) + goto nla_put_failure; + + md = (struct erspan_metadata *)&enc_opts->data[0]; + if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version)) + goto nla_put_failure; + + if (md->version == 1 && + nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index)) + goto nla_put_failure; + + if (md->version == 2 && + (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, + md->u.md2.dir) || + nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, + get_hwid(&md->u.md2)))) + goto nla_put_failure; + + nla_nest_end(skb, nest); + return 0; + +nla_put_failure: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + static int fl_dump_key_ct(struct sk_buff *skb, struct flow_dissector_key_ct *key, struct flow_dissector_key_ct *mask) @@ -2188,6 +2432,16 @@ static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, if (err) goto nla_put_failure; break; + case TUNNEL_VXLAN_OPT: + err = fl_dump_key_vxlan_opt(skb, enc_opts); + if (err) + goto nla_put_failure; + break; + case TUNNEL_ERSPAN_OPT: + err = fl_dump_key_erspan_opt(skb, enc_opts); + if (err) + goto nla_put_failure; + break; default: goto nla_put_failure; } diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index df98a887eb89..b0b0dc46af61 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -22,6 +22,7 @@ #define QUEUE_THRESHOLD 16384 #define DQCOUNT_INVALID -1 +#define DTIME_INVALID 0xffffffffffffffff #define MAX_PROB 0xffffffffffffffff #define PIE_SCALE 8 @@ -34,6 +35,7 @@ struct pie_params { u32 beta; /* and are used for shift relative to 1 */ bool ecn; /* true if ecn is enabled */ bool bytemode; /* to scale drop early prob based on pkt size */ + u8 dq_rate_estimator; /* to calculate delay using Little's law */ }; /* variables used */ @@ -77,11 +79,34 @@ static void pie_params_init(struct pie_params *params) params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */ params->ecn = false; params->bytemode = false; + params->dq_rate_estimator = false; +} + +/* private skb vars */ +struct pie_skb_cb { + psched_time_t enqueue_time; +}; + +static struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb) +{ + qdisc_cb_private_validate(skb, sizeof(struct pie_skb_cb)); + return (struct pie_skb_cb *)qdisc_skb_cb(skb)->data; +} + +static psched_time_t pie_get_enqueue_time(const struct sk_buff *skb) +{ + return get_pie_cb(skb)->enqueue_time; +} + +static void pie_set_enqueue_time(struct sk_buff *skb) +{ + get_pie_cb(skb)->enqueue_time = psched_get_time(); } static void pie_vars_init(struct pie_vars *vars) { vars->dq_count = DQCOUNT_INVALID; + vars->dq_tstamp = DTIME_INVALID; vars->accu_prob = 0; vars->avg_dq_rate = 0; /* default of 150 ms in pschedtime */ @@ -172,6 +197,10 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* we can enqueue the packet */ if (enqueue) { + /* Set enqueue time only when dq_rate_estimator is disabled. */ + if (!q->params.dq_rate_estimator) + pie_set_enqueue_time(skb); + q->stats.packets_in++; if (qdisc_qlen(sch) > q->stats.maxq) q->stats.maxq = qdisc_qlen(sch); @@ -194,6 +223,7 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = { [TCA_PIE_BETA] = {.type = NLA_U32}, [TCA_PIE_ECN] = {.type = NLA_U32}, [TCA_PIE_BYTEMODE] = {.type = NLA_U32}, + [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32}, }; static int pie_change(struct Qdisc *sch, struct nlattr *opt, @@ -247,6 +277,10 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_PIE_BYTEMODE]) q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]); + if (tb[TCA_PIE_DQ_RATE_ESTIMATOR]) + q->params.dq_rate_estimator = + nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]); + /* Drop excess packets if new limit is lower */ qlen = sch->q.qlen; while (sch->q.qlen > sch->limit) { @@ -266,6 +300,28 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) { struct pie_sched_data *q = qdisc_priv(sch); int qlen = sch->qstats.backlog; /* current queue size in bytes */ + psched_time_t now = psched_get_time(); + u32 dtime = 0; + + /* If dq_rate_estimator is disabled, calculate qdelay using the + * packet timestamp. + */ + if (!q->params.dq_rate_estimator) { + q->vars.qdelay = now - pie_get_enqueue_time(skb); + + if (q->vars.dq_tstamp != DTIME_INVALID) + dtime = now - q->vars.dq_tstamp; + + q->vars.dq_tstamp = now; + + if (qlen == 0) + q->vars.qdelay = 0; + + if (dtime == 0) + return; + + goto burst_allowance_reduction; + } /* If current queue is about 10 packets or more and dq_count is unset * we have enough packets to calculate the drain rate. Save @@ -289,10 +345,10 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) q->vars.dq_count += skb->len; if (q->vars.dq_count >= QUEUE_THRESHOLD) { - psched_time_t now = psched_get_time(); - u32 dtime = now - q->vars.dq_tstamp; u32 count = q->vars.dq_count << PIE_SCALE; + dtime = now - q->vars.dq_tstamp; + if (dtime == 0) return; @@ -317,14 +373,19 @@ static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) q->vars.dq_tstamp = psched_get_time(); } - if (q->vars.burst_time > 0) { - if (q->vars.burst_time > dtime) - q->vars.burst_time -= dtime; - else - q->vars.burst_time = 0; - } + goto burst_allowance_reduction; } } + + return; + +burst_allowance_reduction: + if (q->vars.burst_time > 0) { + if (q->vars.burst_time > dtime) + q->vars.burst_time -= dtime; + else + q->vars.burst_time = 0; + } } static void calculate_probability(struct Qdisc *sch) @@ -332,19 +393,25 @@ static void calculate_probability(struct Qdisc *sch) struct pie_sched_data *q = qdisc_priv(sch); u32 qlen = sch->qstats.backlog; /* queue size in bytes */ psched_time_t qdelay = 0; /* in pschedtime */ - psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */ + psched_time_t qdelay_old = 0; /* in pschedtime */ s64 delta = 0; /* determines the change in probability */ u64 oldprob; u64 alpha, beta; u32 power; bool update_prob = true; - q->vars.qdelay_old = q->vars.qdelay; + if (q->params.dq_rate_estimator) { + qdelay_old = q->vars.qdelay; + q->vars.qdelay_old = q->vars.qdelay; - if (q->vars.avg_dq_rate > 0) - qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate; - else - qdelay = 0; + if (q->vars.avg_dq_rate > 0) + qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate; + else + qdelay = 0; + } else { + qdelay = q->vars.qdelay; + qdelay_old = q->vars.qdelay_old; + } /* If qdelay is zero and qlen is not, it means qlen is very small, less * than dequeue_rate, so we do not update probabilty in this round @@ -430,14 +497,18 @@ static void calculate_probability(struct Qdisc *sch) /* We restart the measurement cycle if the following conditions are met * 1. If the delay has been low for 2 consecutive Tupdate periods * 2. Calculated drop probability is zero - * 3. We have atleast one estimate for the avg_dq_rate ie., - * is a non-zero value + * 3. If average dq_rate_estimator is enabled, we have atleast one + * estimate for the avg_dq_rate ie., is a non-zero value */ if ((q->vars.qdelay < q->params.target / 2) && (q->vars.qdelay_old < q->params.target / 2) && q->vars.prob == 0 && - q->vars.avg_dq_rate > 0) + (!q->params.dq_rate_estimator || q->vars.avg_dq_rate > 0)) { pie_vars_init(&q->vars); + } + + if (!q->params.dq_rate_estimator) + q->vars.qdelay_old = qdelay; } static void pie_timer(struct timer_list *t) @@ -497,7 +568,9 @@ static int pie_dump(struct Qdisc *sch, struct sk_buff *skb) nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) || nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) || nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) || - nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode)) + nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode) || + nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR, + q->params.dq_rate_estimator)) goto nla_put_failure; return nla_nest_end(skb, opts); @@ -514,9 +587,6 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) .prob = q->vars.prob, .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) / NSEC_PER_USEC, - /* unscale and return dq_rate in bytes per sec */ - .avg_dq_rate = q->vars.avg_dq_rate * - (PSCHED_TICKS_PER_SEC) >> PIE_SCALE, .packets_in = q->stats.packets_in, .overlimit = q->stats.overlimit, .maxq = q->stats.maxq, @@ -524,6 +594,14 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) .ecn_mark = q->stats.ecn_mark, }; + /* avg_dq_rate is only valid if dq_rate_estimator is enabled */ + st.dq_rate_estimating = q->params.dq_rate_estimator; + + /* unscale and return dq_rate in bytes per sec */ + if (q->params.dq_rate_estimator) + st.avg_dq_rate = q->vars.avg_dq_rate * + (PSCHED_TICKS_PER_SEC) >> PIE_SCALE; + return gnet_stats_copy_app(d, &st, sizeof(st)); } diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 7cd68628c637..c609373c8661 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -922,7 +922,7 @@ static int taprio_parse_mqprio_opt(struct net_device *dev, } /* Verify priority mapping uses valid tcs */ - for (i = 0; i < TC_BITMASK + 1; i++) { + for (i = 0; i <= TC_BITMASK; i++) { if (qopt->prio_tc_map[i] >= qopt->num_tc) { NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); return -EINVAL; @@ -1347,6 +1347,26 @@ out: return err; } +static int taprio_mqprio_cmp(const struct net_device *dev, + const struct tc_mqprio_qopt *mqprio) +{ + int i; + + if (!mqprio || mqprio->num_tc != dev->num_tc) + return -1; + + for (i = 0; i < mqprio->num_tc; i++) + if (dev->tc_to_txq[i].count != mqprio->count[i] || + dev->tc_to_txq[i].offset != mqprio->offset[i]) + return -1; + + for (i = 0; i <= TC_BITMASK; i++) + if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i]) + return -1; + + return 0; +} + static int taprio_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { @@ -1398,6 +1418,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, admin = rcu_dereference(q->admin_sched); rcu_read_unlock(); + /* no changes - no new mqprio settings */ + if (!taprio_mqprio_cmp(dev, mqprio)) + mqprio = NULL; + if (mqprio && (oper || admin)) { NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); err = -ENOTSUPP; @@ -1455,7 +1479,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, mqprio->offset[i]); /* Always use supplied priority mappings */ - for (i = 0; i < TC_BITMASK + 1; i++) + for (i = 0; i <= TC_BITMASK; i++) netdev_set_prio_tc_map(dev, i, mqprio->prio_tc_map[i]); } diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index f41096a759fa..55aeba681cf4 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -87,9 +87,9 @@ int tipc_bcast_get_mtu(struct net *net) return tipc_link_mss(tipc_bc_sndlink(net)); } -void tipc_bcast_disable_rcast(struct net *net) +void tipc_bcast_toggle_rcast(struct net *net, bool supp) { - tipc_bc_base(net)->rcast_support = false; + tipc_bc_base(net)->rcast_support = supp; } static void tipc_bcbase_calc_bc_threshold(struct net *net) diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index dadad953e2be..9e847d9617d3 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -85,7 +85,7 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl); void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id); void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id); int tipc_bcast_get_mtu(struct net *net); -void tipc_bcast_disable_rcast(struct net *net); +void tipc_bcast_toggle_rcast(struct net *net, bool supp); int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, struct tipc_mc_method *method, struct tipc_nlist *dests, u16 *cong_link_cnt); diff --git a/net/tipc/link.c b/net/tipc/link.c index fb72031228c9..24d4d10756d3 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -550,7 +550,7 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, /* Disable replicast if even a single peer doesn't support it */ if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST)) - tipc_bcast_disable_rcast(net); + tipc_bcast_toggle_rcast(net, false); return true; } diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 66a65c2cdb23..92d04dc2a44b 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -35,6 +35,7 @@ */ #include <net/sock.h> +#include <linux/list_sort.h> #include "core.h" #include "netlink.h" #include "name_table.h" @@ -66,6 +67,7 @@ struct service_range { /** * struct tipc_service - container for all published instances of a service type * @type: 32 bit 'type' value for service + * @publ_cnt: increasing counter for publications in this service * @ranges: rb tree containing all service ranges for this service * @service_list: links to adjacent name ranges in hash chain * @subscriptions: list of subscriptions for this service type @@ -74,6 +76,7 @@ struct service_range { */ struct tipc_service { u32 type; + u32 publ_cnt; struct rb_root ranges; struct hlist_node service_list; struct list_head subscriptions; @@ -109,6 +112,7 @@ static struct publication *tipc_publ_create(u32 type, u32 lower, u32 upper, INIT_LIST_HEAD(&publ->binding_node); INIT_LIST_HEAD(&publ->local_publ); INIT_LIST_HEAD(&publ->all_publ); + INIT_LIST_HEAD(&publ->list); return publ; } @@ -244,6 +248,8 @@ static struct publication *tipc_service_insert_publ(struct net *net, p = tipc_publ_create(type, lower, upper, scope, node, port, key); if (!p) goto err; + /* Suppose there shouldn't be a huge gap btw publs i.e. >INT_MAX */ + p->id = sc->publ_cnt++; if (in_own_node(net, node)) list_add(&p->local_publ, &sr->local_publ); list_add(&p->all_publ, &sr->all_publ); @@ -278,6 +284,20 @@ static struct publication *tipc_service_remove_publ(struct service_range *sr, } /** + * Code reused: time_after32() for the same purpose + */ +#define publication_after(pa, pb) time_after32((pa)->id, (pb)->id) +static int tipc_publ_sort(void *priv, struct list_head *a, + struct list_head *b) +{ + struct publication *pa, *pb; + + pa = container_of(a, struct publication, list); + pb = container_of(b, struct publication, list); + return publication_after(pa, pb); +} + +/** * tipc_service_subscribe - attach a subscription, and optionally * issue the prescribed number of events if there is any service * range overlapping with the requested range @@ -286,36 +306,51 @@ static void tipc_service_subscribe(struct tipc_service *service, struct tipc_subscription *sub) { struct tipc_subscr *sb = &sub->evt.s; + struct publication *p, *first, *tmp; + struct list_head publ_list; struct service_range *sr; struct tipc_name_seq ns; - struct publication *p; struct rb_node *n; - bool first; + u32 filter; ns.type = tipc_sub_read(sb, seq.type); ns.lower = tipc_sub_read(sb, seq.lower); ns.upper = tipc_sub_read(sb, seq.upper); + filter = tipc_sub_read(sb, filter); tipc_sub_get(sub); list_add(&sub->service_list, &service->subscriptions); - if (tipc_sub_read(sb, filter) & TIPC_SUB_NO_STATUS) + if (filter & TIPC_SUB_NO_STATUS) return; + INIT_LIST_HEAD(&publ_list); for (n = rb_first(&service->ranges); n; n = rb_next(n)) { sr = container_of(n, struct service_range, tree_node); if (sr->lower > ns.upper) break; if (!tipc_sub_check_overlap(&ns, sr->lower, sr->upper)) continue; - first = true; + first = NULL; list_for_each_entry(p, &sr->all_publ, all_publ) { - tipc_sub_report_overlap(sub, sr->lower, sr->upper, - TIPC_PUBLISHED, p->port, - p->node, p->scope, first); - first = false; + if (filter & TIPC_SUB_PORTS) + list_add_tail(&p->list, &publ_list); + else if (!first || publication_after(first, p)) + /* Pick this range's *first* publication */ + first = p; } + if (first) + list_add_tail(&first->list, &publ_list); + } + + /* Sort the publications before reporting */ + list_sort(NULL, &publ_list, tipc_publ_sort); + list_for_each_entry_safe(p, tmp, &publ_list, list) { + tipc_sub_report_overlap(sub, p->lower, p->upper, + TIPC_PUBLISHED, p->port, p->node, + p->scope, true); + list_del_init(&p->list); } } diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index f79066334cc8..728bc7016c38 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -58,6 +58,7 @@ struct tipc_group; * @node: network address of publishing socket's node * @port: publishing port * @key: publication key, unique across the cluster + * @id: publication id * @binding_node: all publications from the same node which bound this one * - Remote publications: in node->publ_list * Used by node/name distr to withdraw publications when node is lost @@ -69,6 +70,7 @@ struct tipc_group; * Used by closest_first and multicast receive lookup algorithms * @all_publ: all publications identical to this one, whatever node and scope * Used by round-robin lookup algorithm + * @list: to form a list of publications in temporal order * @rcu: RCU callback head used for deferred freeing */ struct publication { @@ -79,10 +81,12 @@ struct publication { u32 node; u32 port; u32 key; + u32 id; struct list_head binding_node; struct list_head binding_sock; struct list_head local_publ; struct list_head all_publ; + struct list_head list; struct rcu_head rcu; }; diff --git a/net/tipc/node.c b/net/tipc/node.c index aaf595613e6e..ab04e00cb95b 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -496,6 +496,9 @@ update: tn->capabilities &= temp_node->capabilities; } + tipc_bcast_toggle_rcast(net, + (tn->capabilities & TIPC_BCAST_RCAST)); + goto exit; } n = kzalloc(sizeof(*n), GFP_ATOMIC); @@ -557,6 +560,7 @@ update: list_for_each_entry_rcu(temp_node, &tn->node_list, list) { tn->capabilities &= temp_node->capabilities; } + tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST)); trace_tipc_node_create(n, true, " "); exit: spin_unlock_bh(&tn->node_list_lock); @@ -740,7 +744,8 @@ static bool tipc_node_cleanup(struct tipc_node *peer) list_for_each_entry_rcu(temp_node, &tn->node_list, list) { tn->capabilities &= temp_node->capabilities; } - + tipc_bcast_toggle_rcast(peer->net, + (tn->capabilities & TIPC_BCAST_RCAST)); spin_unlock_bh(&tn->node_list_lock); return deleted; } @@ -2198,6 +2203,7 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) list_for_each_entry_rcu(temp_node, &tn->node_list, list) { tn->capabilities &= temp_node->capabilities; } + tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST)); err = 0; err_out: tipc_node_put(peer); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 5bb93dd5762b..bdca31ffe6da 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -861,6 +861,7 @@ static int __init tls_register(void) tls_sw_proto_ops = inet_stream_ops; tls_sw_proto_ops.splice_read = tls_sw_splice_read; + tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked, tls_device_init(); tcp_register_ulp(&tcp_tls_ulp_ops); diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 141da093ff04..da9f9ce51e7b 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1209,6 +1209,17 @@ sendpage_end: return copied ? copied : ret; } +int tls_sw_sendpage_locked(struct sock *sk, struct page *page, + int offset, size_t size, int flags) +{ + if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | + MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY | + MSG_NO_SHARED_FRAGS)) + return -ENOTSUPP; + + return tls_sw_do_sendpage(sk, page, offset, size, flags); +} + int tls_sw_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index cc8659838bf2..74db4cd637a7 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -412,6 +412,7 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) const struct vsock_transport *new_transport; struct sock *sk = sk_vsock(vsk); unsigned int remote_cid = vsk->remote_addr.svm_cid; + int ret; switch (sk->sk_type) { case SOCK_DGRAM: @@ -443,9 +444,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) if (!new_transport || !try_module_get(new_transport->module)) return -ENODEV; + ret = new_transport->init(vsk, psk); + if (ret) { + module_put(new_transport->module); + return ret; + } + vsk->transport = new_transport; - return vsk->transport->init(vsk, psk); + return 0; } EXPORT_SYMBOL_GPL(vsock_assign_transport); diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig index 51bb6018f3bf..17b8a7d4b71b 100644 --- a/net/xfrm/Kconfig +++ b/net/xfrm/Kconfig @@ -3,13 +3,13 @@ # XFRM configuration # config XFRM - bool - depends on INET - select GRO_CELLS - select SKB_EXTENSIONS + bool + depends on INET + select GRO_CELLS + select SKB_EXTENSIONS config XFRM_OFFLOAD - bool + bool config XFRM_ALGO tristate diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 8e32a4d29a38..1fc42ad8ff49 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -167,6 +167,7 @@ always += xdp_sample_pkts_kern.o always += ibumad_kern.o always += hbm_out_kern.o always += hbm_edt_kern.o +always += xdpsock_kern.o ifeq ($(ARCH), arm) # Strip all except -D__LINUX_ARM_ARCH__ option needed to handle linux diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c index e0fbab9bec83..829b68d87687 100644 --- a/samples/bpf/hbm.c +++ b/samples/bpf/hbm.c @@ -147,7 +147,7 @@ static int prog_load(char *prog) } if (ret) { - printf("ERROR: load_bpf_file failed for: %s\n", prog); + printf("ERROR: bpf_prog_load_xattr failed for: %s\n", prog); printf(" Output from verifier:\n%s\n------\n", bpf_log_buf); ret = -1; } else { diff --git a/samples/bpf/sockex1_kern.c b/samples/bpf/sockex1_kern.c index f96943f443ab..2408dbfb7a21 100644 --- a/samples/bpf/sockex1_kern.c +++ b/samples/bpf/sockex1_kern.c @@ -5,12 +5,12 @@ #include "bpf_helpers.h" #include "bpf_legacy.h" -struct bpf_map_def SEC("maps") my_map = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 256, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 256); +} my_map SEC(".maps"); SEC("socket1") int bpf_prog1(struct __sk_buff *skb) diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c index 5566fa7d92fa..a7bcd03bf529 100644 --- a/samples/bpf/sockex2_kern.c +++ b/samples/bpf/sockex2_kern.c @@ -190,12 +190,12 @@ struct pair { long bytes; }; -struct bpf_map_def SEC("maps") hash_map = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(__be32), - .value_size = sizeof(struct pair), - .max_entries = 1024, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __be32); + __type(value, struct pair); + __uint(max_entries, 1024); +} hash_map SEC(".maps"); SEC("socket2") int bpf_prog2(struct __sk_buff *skb) diff --git a/samples/bpf/xdp1_kern.c b/samples/bpf/xdp1_kern.c index 219742106bfd..db6870aee42c 100644 --- a/samples/bpf/xdp1_kern.c +++ b/samples/bpf/xdp1_kern.c @@ -14,12 +14,12 @@ #include <linux/ipv6.h> #include "bpf_helpers.h" -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 256, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 256); +} rxcnt SEC(".maps"); static int parse_ipv4(void *data, u64 nh_off, void *data_end) { diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c index a8e5fa02e8a8..3e553eed95a7 100644 --- a/samples/bpf/xdp1_user.c +++ b/samples/bpf/xdp1_user.c @@ -139,7 +139,7 @@ int main(int argc, char **argv) map_fd = bpf_map__fd(map); if (!prog_fd) { - printf("load_bpf_file: %s\n", strerror(errno)); + printf("bpf_prog_load_xattr: %s\n", strerror(errno)); return 1; } diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c index e01288867d15..c74b52c6d945 100644 --- a/samples/bpf/xdp2_kern.c +++ b/samples/bpf/xdp2_kern.c @@ -14,12 +14,12 @@ #include <linux/ipv6.h> #include "bpf_helpers.h" -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 256, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 256); +} rxcnt SEC(".maps"); static void swap_src_dst_mac(void *data) { diff --git a/samples/bpf/xdp_adjust_tail_kern.c b/samples/bpf/xdp_adjust_tail_kern.c index c616508befb9..0f707e0fb375 100644 --- a/samples/bpf/xdp_adjust_tail_kern.c +++ b/samples/bpf/xdp_adjust_tail_kern.c @@ -28,12 +28,12 @@ /* volatile to prevent compiler optimizations */ static volatile __u32 max_pcktsz = MAX_PCKT_SIZE; -struct bpf_map_def SEC("maps") icmpcnt = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u64), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u64); + __uint(max_entries, 1); +} icmpcnt SEC(".maps"); static __always_inline void count_icmp(void) { diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c index 701a30f258b1..d013029aeaa2 100644 --- a/samples/bpf/xdp_fwd_kern.c +++ b/samples/bpf/xdp_fwd_kern.c @@ -23,13 +23,12 @@ #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) -/* For TX-traffic redirect requires net_device ifindex to be in this devmap */ -struct bpf_map_def SEC("maps") xdp_tx_ports = { - .type = BPF_MAP_TYPE_DEVMAP, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 64, -}; +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(int)); + __uint(max_entries, 64); +} xdp_tx_ports SEC(".maps"); /* from include/net/ip.h */ static __always_inline int ip_decrease_ttl(struct iphdr *iph) diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c index a306d1c75622..cfcc31e51197 100644 --- a/samples/bpf/xdp_redirect_cpu_kern.c +++ b/samples/bpf/xdp_redirect_cpu_kern.c @@ -18,12 +18,12 @@ #define MAX_CPUS 64 /* WARNING - sync with _user.c */ /* Special map type that can XDP_REDIRECT frames to another CPU */ -struct bpf_map_def SEC("maps") cpu_map = { - .type = BPF_MAP_TYPE_CPUMAP, - .key_size = sizeof(u32), - .value_size = sizeof(u32), - .max_entries = MAX_CPUS, -}; +struct { + __uint(type, BPF_MAP_TYPE_CPUMAP); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u32)); + __uint(max_entries, MAX_CPUS); +} cpu_map SEC(".maps"); /* Common stats data record to keep userspace more simple */ struct datarec { @@ -35,67 +35,67 @@ struct datarec { /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success * feedback. Redirect TX errors can be caught via a tracepoint. */ -struct bpf_map_def SEC("maps") rx_cnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, 1); +} rx_cnt SEC(".maps"); /* Used by trace point */ -struct bpf_map_def SEC("maps") redirect_err_cnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = 2, +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, 2); /* TODO: have entries for all possible errno's */ -}; +} redirect_err_cnt SEC(".maps"); /* Used by trace point */ -struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = MAX_CPUS, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, MAX_CPUS); +} cpumap_enqueue_cnt SEC(".maps"); /* Used by trace point */ -struct bpf_map_def SEC("maps") cpumap_kthread_cnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, 1); +} cpumap_kthread_cnt SEC(".maps"); /* Set of maps controlling available CPU, and for iterating through * selectable redirect CPUs. */ -struct bpf_map_def SEC("maps") cpus_available = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u32), - .max_entries = MAX_CPUS, -}; -struct bpf_map_def SEC("maps") cpus_count = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u32), - .max_entries = 1, -}; -struct bpf_map_def SEC("maps") cpus_iterator = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u32), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, MAX_CPUS); +} cpus_available SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, 1); +} cpus_count SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, u32); + __uint(max_entries, 1); +} cpus_iterator SEC(".maps"); /* Used by trace point */ -struct bpf_map_def SEC("maps") exception_cnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, 1); +} exception_cnt SEC(".maps"); /* Helper parse functions */ diff --git a/samples/bpf/xdp_redirect_kern.c b/samples/bpf/xdp_redirect_kern.c index 8abb151e385f..1f0b7d05abb2 100644 --- a/samples/bpf/xdp_redirect_kern.c +++ b/samples/bpf/xdp_redirect_kern.c @@ -19,22 +19,22 @@ #include <linux/ipv6.h> #include "bpf_helpers.h" -struct bpf_map_def SEC("maps") tx_port = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, int); + __uint(max_entries, 1); +} tx_port SEC(".maps"); /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success * feedback. Redirect TX errors can be caught via a tracepoint. */ -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 1); +} rxcnt SEC(".maps"); static void swap_src_dst_mac(void *data) { diff --git a/samples/bpf/xdp_redirect_map_kern.c b/samples/bpf/xdp_redirect_map_kern.c index 740a529ba84f..4631b484c432 100644 --- a/samples/bpf/xdp_redirect_map_kern.c +++ b/samples/bpf/xdp_redirect_map_kern.c @@ -19,22 +19,22 @@ #include <linux/ipv6.h> #include "bpf_helpers.h" -struct bpf_map_def SEC("maps") tx_port = { - .type = BPF_MAP_TYPE_DEVMAP, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 100, -}; +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(int)); + __uint(max_entries, 100); +} tx_port SEC(".maps"); /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success * feedback. Redirect TX errors can be caught via a tracepoint. */ -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, 1); +} rxcnt SEC(".maps"); static void swap_src_dst_mac(void *data) { diff --git a/samples/bpf/xdp_router_ipv4_kern.c b/samples/bpf/xdp_router_ipv4_kern.c index 993f56bc7b9a..bf11efc8e949 100644 --- a/samples/bpf/xdp_router_ipv4_kern.c +++ b/samples/bpf/xdp_router_ipv4_kern.c @@ -42,44 +42,44 @@ struct direct_map { }; /* Map for trie implementation*/ -struct bpf_map_def SEC("maps") lpm_map = { - .type = BPF_MAP_TYPE_LPM_TRIE, - .key_size = 8, - .value_size = sizeof(struct trie_value), - .max_entries = 50, - .map_flags = BPF_F_NO_PREALLOC, -}; +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __uint(key_size, 8); + __uint(value_size, sizeof(struct trie_value)); + __uint(max_entries, 50); + __uint(map_flags, BPF_F_NO_PREALLOC); +} lpm_map SEC(".maps"); /* Map for counter*/ -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(u64), - .max_entries = 256, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, u64); + __uint(max_entries, 256); +} rxcnt SEC(".maps"); /* Map for ARP table*/ -struct bpf_map_def SEC("maps") arp_table = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(__be32), - .value_size = sizeof(__be64), - .max_entries = 50, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __be32); + __type(value, __be64); + __uint(max_entries, 50); +} arp_table SEC(".maps"); /* Map to keep the exact match entries in the route table*/ -struct bpf_map_def SEC("maps") exact_match = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(__be32), - .value_size = sizeof(struct direct_map), - .max_entries = 50, -}; - -struct bpf_map_def SEC("maps") tx_port = { - .type = BPF_MAP_TYPE_DEVMAP, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 100, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __be32); + __type(value, struct direct_map); + __uint(max_entries, 50); +} exact_match SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(int)); + __uint(max_entries, 100); +} tx_port SEC(".maps"); /* Function to set source and destination mac of the packet */ static inline void set_src_dst_mac(void *data, void *src, void *dst) diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c index 222a83eed1cb..272d0f82a6b5 100644 --- a/samples/bpf/xdp_rxq_info_kern.c +++ b/samples/bpf/xdp_rxq_info_kern.c @@ -23,12 +23,13 @@ enum cfg_options_flags { READ_MEM = 0x1U, SWAP_MAC = 0x2U, }; -struct bpf_map_def SEC("maps") config_map = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(int), - .value_size = sizeof(struct config), - .max_entries = 1, -}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct config); + __uint(max_entries, 1); +} config_map SEC(".maps"); /* Common stats data record (shared with userspace) */ struct datarec { @@ -36,22 +37,22 @@ struct datarec { __u64 issue; }; -struct bpf_map_def SEC("maps") stats_global_map = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, 1); +} stats_global_map SEC(".maps"); #define MAX_RXQs 64 /* Stats per rx_queue_index (per CPU) */ -struct bpf_map_def SEC("maps") rx_queue_index_map = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(struct datarec), - .max_entries = MAX_RXQs + 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, u32); + __type(value, struct datarec); + __uint(max_entries, MAX_RXQs + 1); +} rx_queue_index_map SEC(".maps"); static __always_inline void swap_src_dst_mac(void *data) diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c index c7e4e45d824a..51e0d810e070 100644 --- a/samples/bpf/xdp_rxq_info_user.c +++ b/samples/bpf/xdp_rxq_info_user.c @@ -51,8 +51,8 @@ static const struct option long_options[] = { {"sec", required_argument, NULL, 's' }, {"no-separators", no_argument, NULL, 'z' }, {"action", required_argument, NULL, 'a' }, - {"readmem", no_argument, NULL, 'r' }, - {"swapmac", no_argument, NULL, 'm' }, + {"readmem", no_argument, NULL, 'r' }, + {"swapmac", no_argument, NULL, 'm' }, {"force", no_argument, NULL, 'F' }, {0, 0, NULL, 0 } }; @@ -499,7 +499,7 @@ int main(int argc, char **argv) map_fd = bpf_map__fd(map); if (!prog_fd) { - fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno)); + fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n", strerror(errno)); return EXIT_FAIL; } diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c index 3002714e3cd5..a5760e8bf2c4 100644 --- a/samples/bpf/xdp_sample_pkts_user.c +++ b/samples/bpf/xdp_sample_pkts_user.c @@ -150,7 +150,7 @@ int main(int argc, char **argv) return 1; if (!prog_fd) { - printf("load_bpf_file: %s\n", strerror(errno)); + printf("bpf_prog_load_xattr: %s\n", strerror(errno)); return 1; } diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c index 0f4f6e8c8611..6db450a5c1ca 100644 --- a/samples/bpf/xdp_tx_iptunnel_kern.c +++ b/samples/bpf/xdp_tx_iptunnel_kern.c @@ -19,19 +19,19 @@ #include "bpf_helpers.h" #include "xdp_tx_iptunnel_common.h" -struct bpf_map_def SEC("maps") rxcnt = { - .type = BPF_MAP_TYPE_PERCPU_ARRAY, - .key_size = sizeof(__u32), - .value_size = sizeof(__u64), - .max_entries = 256, -}; - -struct bpf_map_def SEC("maps") vip2tnl = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(struct vip), - .value_size = sizeof(struct iptnl_info), - .max_entries = MAX_IPTNL_ENTRIES, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, __u32); + __type(value, __u64); + __uint(max_entries, 256); +} rxcnt SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, struct vip); + __type(value, struct iptnl_info); + __uint(max_entries, MAX_IPTNL_ENTRIES); +} vip2tnl SEC(".maps"); static __always_inline void count_tx(u32 protocol) { diff --git a/samples/bpf/xdp_tx_iptunnel_user.c b/samples/bpf/xdp_tx_iptunnel_user.c index dfb68582e243..2fe4c7f5ffe5 100644 --- a/samples/bpf/xdp_tx_iptunnel_user.c +++ b/samples/bpf/xdp_tx_iptunnel_user.c @@ -268,7 +268,7 @@ int main(int argc, char **argv) return 1; if (!prog_fd) { - printf("load_bpf_file: %s\n", strerror(errno)); + printf("bpf_prog_load_xattr: %s\n", strerror(errno)); return 1; } diff --git a/samples/bpf/xdpsock.h b/samples/bpf/xdpsock.h new file mode 100644 index 000000000000..b7eca15c78cc --- /dev/null +++ b/samples/bpf/xdpsock.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright(c) 2019 Intel Corporation. + */ + +#ifndef XDPSOCK_H_ +#define XDPSOCK_H_ + +#define MAX_SOCKS 4 + +#endif /* XDPSOCK_H */ diff --git a/samples/bpf/xdpsock_kern.c b/samples/bpf/xdpsock_kern.c new file mode 100644 index 000000000000..a06177c262cd --- /dev/null +++ b/samples/bpf/xdpsock_kern.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include "bpf_helpers.h" +#include "xdpsock.h" + +/* This XDP program is only needed for the XDP_SHARED_UMEM mode. + * If you do not use this mode, libbpf can supply an XDP program for you. + */ + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, MAX_SOCKS); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(int)); +} xsks_map SEC(".maps"); + +static unsigned int rr; + +SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx) +{ + rr = (rr + 1) & (MAX_SOCKS - 1); + + return bpf_redirect_map(&xsks_map, rr, XDP_DROP); +} diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 405c4e091f8b..a15480010828 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -29,6 +29,7 @@ #include "libbpf.h" #include "xsk.h" +#include "xdpsock.h" #include <bpf/bpf.h> #ifndef SOL_XDP @@ -47,7 +48,6 @@ #define BATCH_SIZE 64 #define DEBUG_HEXDUMP 0 -#define MAX_SOCKS 8 typedef __u64 u64; typedef __u32 u32; @@ -75,7 +75,8 @@ static u32 opt_xdp_bind_flags; static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; static int opt_timeout = 1000; static bool opt_need_wakeup = true; -static __u32 prog_id; +static u32 opt_num_xsks = 1; +static u32 prog_id; struct xsk_umem_info { struct xsk_ring_prod fq; @@ -179,7 +180,7 @@ static void *poller(void *arg) static void remove_xdp_program(void) { - __u32 curr_prog_id = 0; + u32 curr_prog_id = 0; if (bpf_get_link_xdp_id(opt_ifindex, &curr_prog_id, opt_xdp_flags)) { printf("bpf_get_link_xdp_id failed\n"); @@ -196,11 +197,11 @@ static void remove_xdp_program(void) static void int_exit(int sig) { struct xsk_umem *umem = xsks[0]->umem->umem; - - (void)sig; + int i; dump_stats(); - xsk_socket__delete(xsks[0]->xsk); + for (i = 0; i < num_socks; i++) + xsk_socket__delete(xsks[i]->xsk); (void)xsk_umem__delete(umem); remove_xdp_program(); @@ -290,7 +291,6 @@ static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size) .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM, .flags = opt_umem_flags }; - int ret; umem = calloc(1, sizeof(*umem)); @@ -299,7 +299,6 @@ static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size) ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq, &cfg); - if (ret) exit_with_error(-ret); @@ -307,13 +306,29 @@ static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size) return umem; } -static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem) +static void xsk_populate_fill_ring(struct xsk_umem_info *umem) +{ + int ret, i; + u32 idx; + + ret = xsk_ring_prod__reserve(&umem->fq, + XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx); + if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS) + exit_with_error(-ret); + for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++) + *xsk_ring_prod__fill_addr(&umem->fq, idx++) = + i * opt_xsk_frame_size; + xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS); +} + +static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem, + bool rx, bool tx) { struct xsk_socket_config cfg; struct xsk_socket_info *xsk; + struct xsk_ring_cons *rxr; + struct xsk_ring_prod *txr; int ret; - u32 idx; - int i; xsk = calloc(1, sizeof(*xsk)); if (!xsk) @@ -322,11 +337,17 @@ static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem) xsk->umem = umem; cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; - cfg.libbpf_flags = 0; + if (opt_num_xsks > 1) + cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD; + else + cfg.libbpf_flags = 0; cfg.xdp_flags = opt_xdp_flags; cfg.bind_flags = opt_xdp_bind_flags; + + rxr = rx ? &xsk->rx : NULL; + txr = tx ? &xsk->tx : NULL; ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem, - &xsk->rx, &xsk->tx, &cfg); + rxr, txr, &cfg); if (ret) exit_with_error(-ret); @@ -334,17 +355,6 @@ static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem) if (ret) exit_with_error(-ret); - ret = xsk_ring_prod__reserve(&xsk->umem->fq, - XSK_RING_PROD__DEFAULT_NUM_DESCS, - &idx); - if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS) - exit_with_error(-ret); - for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++) - *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx++) = - i * opt_xsk_frame_size; - xsk_ring_prod__submit(&xsk->umem->fq, - XSK_RING_PROD__DEFAULT_NUM_DESCS); - return xsk; } @@ -363,6 +373,8 @@ static struct option long_options[] = { {"frame-size", required_argument, 0, 'f'}, {"no-need-wakeup", no_argument, 0, 'm'}, {"unaligned", no_argument, 0, 'u'}, + {"shared-umem", no_argument, 0, 'M'}, + {"force", no_argument, 0, 'F'}, {0, 0, 0, 0} }; @@ -382,10 +394,11 @@ static void usage(const char *prog) " -n, --interval=n Specify statistics update interval (default 1 sec).\n" " -z, --zero-copy Force zero-copy mode.\n" " -c, --copy Force copy mode.\n" - " -f, --frame-size=n Set the frame size (must be a power of two, default is %d).\n" " -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n" " -f, --frame-size=n Set the frame size (must be a power of two in aligned mode, default is %d).\n" " -u, --unaligned Enable unaligned chunk placement\n" + " -M, --shared-umem Enable XDP_SHARED_UMEM\n" + " -F, --force Force loading the XDP prog\n" "\n"; fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE); exit(EXIT_FAILURE); @@ -398,7 +411,7 @@ static void parse_command_line(int argc, char **argv) opterr = 0; for (;;) { - c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:mu", + c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:muM", long_options, &option_index); if (c == -1) break; @@ -448,11 +461,14 @@ static void parse_command_line(int argc, char **argv) break; case 'f': opt_xsk_frame_size = atoi(optarg); + break; case 'm': opt_need_wakeup = false; opt_xdp_bind_flags &= ~XDP_USE_NEED_WAKEUP; break; - + case 'M': + opt_num_xsks = MAX_SOCKS; + break; default: usage(basename(argv[0])); } @@ -586,11 +602,9 @@ static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds) static void rx_drop_all(void) { - struct pollfd fds[MAX_SOCKS + 1]; + struct pollfd fds[MAX_SOCKS] = {}; int i, ret; - memset(fds, 0, sizeof(fds)); - for (i = 0; i < num_socks; i++) { fds[i].fd = xsk_socket__fd(xsks[i]->xsk); fds[i].events = POLLIN; @@ -633,11 +647,10 @@ static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb) static void tx_only_all(void) { - struct pollfd fds[MAX_SOCKS]; + struct pollfd fds[MAX_SOCKS] = {}; u32 frame_nb[MAX_SOCKS] = {}; int i, ret; - memset(fds, 0, sizeof(fds)); for (i = 0; i < num_socks; i++) { fds[0].fd = xsk_socket__fd(xsks[i]->xsk); fds[0].events = POLLOUT; @@ -706,11 +719,9 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds) static void l2fwd_all(void) { - struct pollfd fds[MAX_SOCKS]; + struct pollfd fds[MAX_SOCKS] = {}; int i, ret; - memset(fds, 0, sizeof(fds)); - for (i = 0; i < num_socks; i++) { fds[i].fd = xsk_socket__fd(xsks[i]->xsk); fds[i].events = POLLOUT | POLLIN; @@ -728,13 +739,66 @@ static void l2fwd_all(void) } } +static void load_xdp_program(char **argv, struct bpf_object **obj) +{ + struct bpf_prog_load_attr prog_load_attr = { + .prog_type = BPF_PROG_TYPE_XDP, + }; + char xdp_filename[256]; + int prog_fd; + + snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]); + prog_load_attr.file = xdp_filename; + + if (bpf_prog_load_xattr(&prog_load_attr, obj, &prog_fd)) + exit(EXIT_FAILURE); + if (prog_fd < 0) { + fprintf(stderr, "ERROR: no program found: %s\n", + strerror(prog_fd)); + exit(EXIT_FAILURE); + } + + if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) { + fprintf(stderr, "ERROR: link set xdp fd failed\n"); + exit(EXIT_FAILURE); + } +} + +static void enter_xsks_into_map(struct bpf_object *obj) +{ + struct bpf_map *map; + int i, xsks_map; + + map = bpf_object__find_map_by_name(obj, "xsks_map"); + xsks_map = bpf_map__fd(map); + if (xsks_map < 0) { + fprintf(stderr, "ERROR: no xsks map found: %s\n", + strerror(xsks_map)); + exit(EXIT_FAILURE); + } + + for (i = 0; i < num_socks; i++) { + int fd = xsk_socket__fd(xsks[i]->xsk); + int key, ret; + + key = i; + ret = bpf_map_update_elem(xsks_map, &key, &fd, 0); + if (ret) { + fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i); + exit(EXIT_FAILURE); + } + } +} + int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + bool rx = false, tx = false; struct xsk_umem_info *umem; + struct bpf_object *obj; pthread_t pt; + int i, ret; void *bufs; - int ret; parse_command_line(argc, argv); @@ -744,6 +808,9 @@ int main(int argc, char **argv) exit(EXIT_FAILURE); } + if (opt_num_xsks > 1) + load_xdp_program(argv, &obj); + /* Reserve memory for the umem. Use hugepages if unaligned chunk mode */ bufs = mmap(NULL, NUM_FRAMES * opt_xsk_frame_size, PROT_READ | PROT_WRITE, @@ -752,16 +819,24 @@ int main(int argc, char **argv) printf("ERROR: mmap failed\n"); exit(EXIT_FAILURE); } - /* Create sockets... */ - umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size); - xsks[num_socks++] = xsk_configure_socket(umem); - if (opt_bench == BENCH_TXONLY) { - int i; + /* Create sockets... */ + umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size); + if (opt_bench == BENCH_RXDROP || opt_bench == BENCH_L2FWD) { + rx = true; + xsk_populate_fill_ring(umem); + } + if (opt_bench == BENCH_L2FWD || opt_bench == BENCH_TXONLY) + tx = true; + for (i = 0; i < opt_num_xsks; i++) + xsks[num_socks++] = xsk_configure_socket(umem, rx, tx); + if (opt_bench == BENCH_TXONLY) for (i = 0; i < NUM_FRAMES; i++) - (void)gen_eth_frame(umem, i * opt_xsk_frame_size); - } + gen_eth_frame(umem, i * opt_xsk_frame_size); + + if (opt_num_xsks > 1 && opt_bench != BENCH_TXONLY) + enter_xsks_into_map(obj); signal(SIGINT, int_exit); signal(SIGTERM, int_exit); diff --git a/tools/bpf/bpf_exp.y b/tools/bpf/bpf_exp.y index 56ba1de50784..8d48e896be50 100644 --- a/tools/bpf/bpf_exp.y +++ b/tools/bpf/bpf_exp.y @@ -545,6 +545,16 @@ static void bpf_reduce_k_jumps(void) } } +static uint8_t bpf_encode_jt_jf_offset(int off, int i) +{ + int delta = off - i - 1; + + if (delta < 0 || delta > 255) + fprintf(stderr, "warning: insn #%d jumps to insn #%d, " + "which is out of range\n", i, off); + return (uint8_t) delta; +} + static void bpf_reduce_jt_jumps(void) { int i; @@ -552,7 +562,7 @@ static void bpf_reduce_jt_jumps(void) for (i = 0; i < curr_instr; i++) { if (labels_jt[i]) { int off = bpf_find_insns_offset(labels_jt[i]); - out[i].jt = (uint8_t) (off - i -1); + out[i].jt = bpf_encode_jt_jf_offset(off, i); } } } @@ -564,7 +574,7 @@ static void bpf_reduce_jf_jumps(void) for (i = 0; i < curr_instr; i++) { if (labels_jf[i]) { int off = bpf_find_insns_offset(labels_jf[i]); - out[i].jf = (uint8_t) (off - i - 1); + out[i].jf = bpf_encode_jt_jf_offset(off, i); } } } diff --git a/tools/gpio/Build b/tools/gpio/Build index 620c1937d957..4141f35837db 100644 --- a/tools/gpio/Build +++ b/tools/gpio/Build @@ -1,3 +1,4 @@ +gpio-utils-y += gpio-utils.o lsgpio-y += lsgpio.o gpio-utils.o gpio-hammer-y += gpio-hammer.o gpio-utils.o gpio-event-mon-y += gpio-event-mon.o gpio-utils.o diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile index 1178d302757e..6080de58861f 100644 --- a/tools/gpio/Makefile +++ b/tools/gpio/Makefile @@ -35,11 +35,15 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h prepare: $(OUTPUT)include/linux/gpio.h +GPIO_UTILS_IN := $(output)gpio-utils-in.o +$(GPIO_UTILS_IN): prepare FORCE + $(Q)$(MAKE) $(build)=gpio-utils + # # lsgpio # LSGPIO_IN := $(OUTPUT)lsgpio-in.o -$(LSGPIO_IN): prepare FORCE +$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o $(Q)$(MAKE) $(build)=lsgpio $(OUTPUT)lsgpio: $(LSGPIO_IN) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ @@ -48,7 +52,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN) # gpio-hammer # GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o -$(GPIO_HAMMER_IN): prepare FORCE +$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o $(Q)$(MAKE) $(build)=gpio-hammer $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ @@ -57,7 +61,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN) # gpio-event-mon # GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o -$(GPIO_EVENT_MON_IN): prepare FORCE +$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o $(Q)$(MAKE) $(build)=gpio-event-mon $(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index df6809a76404..dbbcf0b02970 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -201,6 +201,8 @@ enum bpf_attach_type { BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, BPF_TRACE_RAW_TP, + BPF_TRACE_FENTRY, + BPF_TRACE_FEXIT, __MAX_BPF_ATTACH_TYPE }; @@ -346,6 +348,9 @@ enum bpf_attach_type { /* Clone map from listener for newly accepted socket */ #define BPF_F_CLONE (1U << 9) +/* Enable memory-mapping BPF map */ +#define BPF_F_MMAPABLE (1U << 10) + /* flags for BPF_PROG_QUERY */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) @@ -423,6 +428,7 @@ union bpf_attr { __aligned_u64 line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + __u32 attach_prog_fd; /* 0 to attach to vmlinux */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index ca0d635b1d5e..98596e15390f 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -189,7 +189,7 @@ static void * alloc_zero_tailing_info(const void *orecord, __u32 cnt, __u32 actual_rec_size, __u32 expected_rec_size) { - __u64 info_len = actual_rec_size * cnt; + __u64 info_len = (__u64)actual_rec_size * cnt; void *info, *nrecord; int i; @@ -228,10 +228,13 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, memset(&attr, 0, sizeof(attr)); attr.prog_type = load_attr->prog_type; attr.expected_attach_type = load_attr->expected_attach_type; - if (attr.prog_type == BPF_PROG_TYPE_TRACING) + if (attr.prog_type == BPF_PROG_TYPE_TRACING) { attr.attach_btf_id = load_attr->attach_btf_id; - else + attr.attach_prog_fd = load_attr->attach_prog_fd; + } else { attr.prog_ifindex = load_attr->prog_ifindex; + attr.kern_version = load_attr->kern_version; + } attr.insn_cnt = (__u32)load_attr->insns_cnt; attr.insns = ptr_to_u64(load_attr->insns); attr.license = ptr_to_u64(load_attr->license); @@ -245,7 +248,6 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, attr.log_size = 0; } - attr.kern_version = load_attr->kern_version; attr.prog_btf_fd = load_attr->prog_btf_fd; attr.func_info_rec_size = load_attr->func_info_rec_size; attr.func_info_cnt = load_attr->func_info_cnt; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 1c53bc5b4b3c..3c791fa8e68e 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -77,7 +77,10 @@ struct bpf_load_program_attr { const struct bpf_insn *insns; size_t insns_cnt; const char *license; - __u32 kern_version; + union { + __u32 kern_version; + __u32 attach_prog_fd; + }; union { __u32 prog_ifindex; __u32 attach_btf_id; diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h index a273df3784f4..7009dc90e012 100644 --- a/tools/lib/bpf/bpf_core_read.h +++ b/tools/lib/bpf/bpf_core_read.h @@ -12,9 +12,76 @@ */ enum bpf_field_info_kind { BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_FIELD_BYTE_SIZE = 1, BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_FIELD_SIGNED = 3, + BPF_FIELD_LSHIFT_U64 = 4, + BPF_FIELD_RSHIFT_U64 = 5, }; +#define __CORE_RELO(src, field, info) \ + __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read((void *)dst, \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#else +/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so + * for big-endian we need to adjust destination pointer accordingly, based on + * field byte size + */ +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#endif + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * All this is done in relocatable manner, so bitfield changes such as + * signedness, bit size, offset changes, this will be handled automatically. + * This version of macro is using bpf_probe_read() to read underlying integer + * storage. Macro functions as an expression and its return type is + * bpf_probe_read()'s return value: 0, on success, <0 on error. + */ +#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \ + unsigned long long val = 0; \ + \ + __CORE_BITFIELD_PROBE_READ(&val, s, field); \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * This version of macro is using direct memory reads and should be used from + * BPF program types that support such functionality (e.g., typed raw + * tracepoints). + */ +#define BPF_CORE_READ_BITFIELD(s, field) ({ \ + const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ + unsigned long long val; \ + \ + switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ + case 1: val = *(const unsigned char *)p; \ + case 2: val = *(const unsigned short *)p; \ + case 4: val = *(const unsigned int *)p; \ + case 8: val = *(const unsigned long long *)p; \ + } \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + /* * Convenience macro to check that field actually exists in target kernel's. * Returns: @@ -25,6 +92,13 @@ enum bpf_field_info_kind { __builtin_preserve_field_info(field, BPF_FIELD_EXISTS) /* + * Convenience macro to get byte size of a field. Works for integers, + * struct/unions, pointers, arrays, and enums. + */ +#define bpf_core_field_size(field) \ + __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE) + +/* * bpf_core_read() abstracts away bpf_probe_read() call and captures offset * relocation for source address using __builtin_preserve_access_index() * built-in, provided by Clang. diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 0c7d28292898..c63ab1add126 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -44,4 +44,17 @@ enum libbpf_pin_type { LIBBPF_PIN_BY_NAME, }; +/* The following types should be used by BPF_PROG_TYPE_TRACING program to + * access kernel function arguments. BPF trampoline and raw tracepoints + * typecast arguments to 'unsigned long long'. + */ +typedef int __attribute__((aligned(8))) ks32; +typedef char __attribute__((aligned(8))) ks8; +typedef short __attribute__((aligned(8))) ks16; +typedef long long __attribute__((aligned(8))) ks64; +typedef unsigned int __attribute__((aligned(8))) ku32; +typedef unsigned char __attribute__((aligned(8))) ku8; +typedef unsigned short __attribute__((aligned(8))) ku16; +typedef unsigned long long __attribute__((aligned(8))) ku64; + #endif diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c index 8c67561c93b0..3ed1a27b5f7c 100644 --- a/tools/lib/bpf/bpf_prog_linfo.c +++ b/tools/lib/bpf/bpf_prog_linfo.c @@ -101,6 +101,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) { struct bpf_prog_linfo *prog_linfo; __u32 nr_linfo, nr_jited_func; + __u64 data_sz; nr_linfo = info->nr_line_info; @@ -122,11 +123,11 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) /* Copy xlated line_info */ prog_linfo->nr_linfo = nr_linfo; prog_linfo->rec_size = info->line_info_rec_size; - prog_linfo->raw_linfo = malloc(nr_linfo * prog_linfo->rec_size); + data_sz = (__u64)nr_linfo * prog_linfo->rec_size; + prog_linfo->raw_linfo = malloc(data_sz); if (!prog_linfo->raw_linfo) goto err_free; - memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, - nr_linfo * prog_linfo->rec_size); + memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, data_sz); nr_jited_func = info->nr_jited_ksyms; if (!nr_jited_func || @@ -142,13 +143,12 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) /* Copy jited_line_info */ prog_linfo->nr_jited_func = nr_jited_func; prog_linfo->jited_rec_size = info->jited_line_info_rec_size; - prog_linfo->raw_jited_linfo = malloc(nr_linfo * - prog_linfo->jited_rec_size); + data_sz = (__u64)nr_linfo * prog_linfo->jited_rec_size; + prog_linfo->raw_jited_linfo = malloc(data_sz); if (!prog_linfo->raw_jited_linfo) goto err_free; memcpy(prog_linfo->raw_jited_linfo, - (void *)(long)info->jited_line_info, - nr_linfo * prog_linfo->jited_rec_size); + (void *)(long)info->jited_line_info, data_sz); /* Number of jited_line_info per jited func */ prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func * diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index d72e9a79dce1..88efa2bb7137 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -269,10 +269,9 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) t = btf__type_by_id(btf, type_id); } +done: if (size < 0) return -EINVAL; - -done: if (nelems && size > UINT32_MAX / nelems) return -E2BIG; @@ -317,6 +316,28 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name) return -ENOENT; } +__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, + __u32 kind) +{ + __u32 i; + + if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) + return 0; + + for (i = 1; i <= btf->nr_types; i++) { + const struct btf_type *t = btf->types[i]; + const char *name; + + if (btf_kind(t) != kind) + continue; + name = btf__name_by_offset(btf, t->name_off); + if (name && !strcmp(type_name, name)) + return i; + } + + return -ENOENT; +} + void btf__free(struct btf *btf) { if (!btf) diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index b18994116a44..d9ac73a02cde 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -72,6 +72,8 @@ LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); LIBBPF_API int btf__load(struct btf *btf); LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, const char *type_name); +LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, + const char *type_name, __u32 kind); LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7aa2a2a22cef..a7d183f7ac72 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -142,6 +142,8 @@ struct bpf_capabilities { __u32 btf_func:1; /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ __u32 btf_datasec:1; + /* BPF_F_MMAPABLE is supported for arrays */ + __u32 array_mmap:1; }; /* @@ -189,6 +191,7 @@ struct bpf_program { enum bpf_attach_type expected_attach_type; __u32 attach_btf_id; + __u32 attach_prog_fd; void *func_info; __u32 func_info_rec_size; __u32 func_info_cnt; @@ -229,6 +232,7 @@ struct bpf_map { enum libbpf_map_type libbpf_type; char *pin_path; bool pinned; + bool reused; }; struct bpf_secdata { @@ -855,8 +859,6 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, pr_warn("failed to alloc map name\n"); return -ENOMEM; } - pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n", - map_name, map->sec_idx, map->sec_offset); def = &map->def; def->type = BPF_MAP_TYPE_ARRAY; @@ -864,6 +866,12 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, def->value_size = data->d_size; def->max_entries = 1; def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0; + if (obj->caps.array_mmap) + def->map_flags |= BPF_F_MMAPABLE; + + pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", + map_name, map->sec_idx, map->sec_offset, def->map_flags); + if (data_buff) { *data_buff = malloc(data->d_size); if (!*data_buff) { @@ -956,13 +964,13 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path, nr_maps, data->d_size); - map_def_sz = data->d_size / nr_maps; - if (!data->d_size || (data->d_size % nr_maps) != 0) { + if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { pr_warn("unable to determine map definition size " "section %s, %d maps in %zd bytes\n", obj->path, nr_maps, data->d_size); return -EINVAL; } + map_def_sz = data->d_size / nr_maps; /* Fill obj->maps using data in "maps" section. */ for (i = 0; i < nr_syms; i++) { @@ -1862,9 +1870,13 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, pr_warn("incorrect bpf_call opcode\n"); return -LIBBPF_ERRNO__RELOC; } + if (sym.st_value % 8) { + pr_warn("bad call relo offset: %lu\n", sym.st_value); + return -LIBBPF_ERRNO__RELOC; + } prog->reloc_desc[i].type = RELO_CALL; prog->reloc_desc[i].insn_idx = insn_idx; - prog->reloc_desc[i].text_off = sym.st_value; + prog->reloc_desc[i].text_off = sym.st_value / 8; obj->has_pseudo_calls = true; continue; } @@ -1995,6 +2007,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) map->def.map_flags = info.map_flags; map->btf_key_type_id = info.btf_key_type_id; map->btf_value_type_id = info.btf_value_type_id; + map->reused = true; return 0; @@ -2158,6 +2171,27 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj) return 0; } +static int bpf_object__probe_array_mmap(struct bpf_object *obj) +{ + struct bpf_create_map_attr attr = { + .map_type = BPF_MAP_TYPE_ARRAY, + .map_flags = BPF_F_MMAPABLE, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 1, + }; + int fd; + + fd = bpf_create_map_xattr(&attr); + if (fd >= 0) { + obj->caps.array_mmap = 1; + close(fd); + return 1; + } + + return 0; +} + static int bpf_object__probe_caps(struct bpf_object *obj) { @@ -2166,6 +2200,7 @@ bpf_object__probe_caps(struct bpf_object *obj) bpf_object__probe_global_data, bpf_object__probe_btf_func, bpf_object__probe_btf_datasec, + bpf_object__probe_array_mmap, }; int i, ret; @@ -2470,8 +2505,8 @@ struct bpf_core_spec { int raw_spec[BPF_CORE_SPEC_MAX_LEN]; /* raw spec length */ int raw_len; - /* field byte offset represented by spec */ - __u32 offset; + /* field bit offset represented by spec */ + __u32 bit_offset; }; static bool str_is_empty(const char *s) @@ -2482,8 +2517,8 @@ static bool str_is_empty(const char *s) /* * Turn bpf_field_reloc into a low- and high-level spec representation, * validating correctness along the way, as well as calculating resulting - * field offset (in bytes), specified by accessor string. Low-level spec - * captures every single level of nestedness, including traversing anonymous + * field bit offset, specified by accessor string. Low-level spec captures + * every single level of nestedness, including traversing anonymous * struct/union members. High-level one only captures semantically meaningful * "turning points": named fields and array indicies. * E.g., for this case: @@ -2555,7 +2590,7 @@ static int bpf_core_spec_parse(const struct btf *btf, sz = btf__resolve_size(btf, id); if (sz < 0) return sz; - spec->offset = access_idx * sz; + spec->bit_offset = access_idx * sz * 8; for (i = 1; i < spec->raw_len; i++) { t = skip_mods_and_typedefs(btf, id, &id); @@ -2566,17 +2601,13 @@ static int bpf_core_spec_parse(const struct btf *btf, if (btf_is_composite(t)) { const struct btf_member *m; - __u32 offset; + __u32 bit_offset; if (access_idx >= btf_vlen(t)) return -EINVAL; - if (btf_member_bitfield_size(t, access_idx)) - return -EINVAL; - offset = btf_member_bit_offset(t, access_idx); - if (offset % 8) - return -EINVAL; - spec->offset += offset / 8; + bit_offset = btf_member_bit_offset(t, access_idx); + spec->bit_offset += bit_offset; m = btf_members(t) + access_idx; if (m->name_off) { @@ -2605,7 +2636,7 @@ static int bpf_core_spec_parse(const struct btf *btf, sz = btf__resolve_size(btf, id); if (sz < 0) return sz; - spec->offset += access_idx * sz; + spec->bit_offset += access_idx * sz * 8; } else { pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n", type_id, spec_str, i, id, btf_kind(t)); @@ -2706,12 +2737,14 @@ err_out: } /* Check two types for compatibility, skipping const/volatile/restrict and - * typedefs, to ensure we are relocating offset to the compatible entities: + * typedefs, to ensure we are relocating compatible entities: * - any two STRUCTs/UNIONs are compatible and can be mixed; - * - any two FWDs are compatible; + * - any two FWDs are compatible, if their names match (modulo flavor suffix); * - any two PTRs are always compatible; + * - for ENUMs, names should be the same (ignoring flavor suffix) or at + * least one of enums should be anonymous; * - for ENUMs, check sizes, names are ignored; - * - for INT, size and bitness should match, signedness is ignored; + * - for INT, size and signedness are ignored; * - for ARRAY, dimensionality is ignored, element types are checked for * compatibility recursively; * - everything else shouldn't be ever a target of relocation. @@ -2737,16 +2770,29 @@ recur: return 0; switch (btf_kind(local_type)) { - case BTF_KIND_FWD: case BTF_KIND_PTR: return 1; - case BTF_KIND_ENUM: - return local_type->size == targ_type->size; + case BTF_KIND_FWD: + case BTF_KIND_ENUM: { + const char *local_name, *targ_name; + size_t local_len, targ_len; + + local_name = btf__name_by_offset(local_btf, + local_type->name_off); + targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); + local_len = bpf_core_essential_name_len(local_name); + targ_len = bpf_core_essential_name_len(targ_name); + /* one of them is anonymous or both w/ same flavor-less names */ + return local_len == 0 || targ_len == 0 || + (local_len == targ_len && + strncmp(local_name, targ_name, local_len) == 0); + } case BTF_KIND_INT: + /* just reject deprecated bitfield-like integers; all other + * integers are by default compatible between each other + */ return btf_int_offset(local_type) == 0 && - btf_int_offset(targ_type) == 0 && - local_type->size == targ_type->size && - btf_int_bits(local_type) == btf_int_bits(targ_type); + btf_int_offset(targ_type) == 0; case BTF_KIND_ARRAY: local_id = btf_array(local_type)->type; targ_id = btf_array(targ_type)->type; @@ -2762,7 +2808,7 @@ recur: * Given single high-level named field accessor in local type, find * corresponding high-level accessor for a target type. Along the way, * maintain low-level spec for target as well. Also keep updating target - * offset. + * bit offset. * * Searching is performed through recursive exhaustive enumeration of all * fields of a struct/union. If there are any anonymous (embedded) @@ -2801,21 +2847,16 @@ static int bpf_core_match_member(const struct btf *local_btf, n = btf_vlen(targ_type); m = btf_members(targ_type); for (i = 0; i < n; i++, m++) { - __u32 offset; + __u32 bit_offset; - /* bitfield relocations not supported */ - if (btf_member_bitfield_size(targ_type, i)) - continue; - offset = btf_member_bit_offset(targ_type, i); - if (offset % 8) - continue; + bit_offset = btf_member_bit_offset(targ_type, i); /* too deep struct/union/array nesting */ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) return -E2BIG; /* speculate this member will be the good one */ - spec->offset += offset / 8; + spec->bit_offset += bit_offset; spec->raw_spec[spec->raw_len++] = i; targ_name = btf__name_by_offset(targ_btf, m->name_off); @@ -2844,7 +2885,7 @@ static int bpf_core_match_member(const struct btf *local_btf, return found; } /* member turned out not to be what we looked for */ - spec->offset -= offset / 8; + spec->bit_offset -= bit_offset; spec->raw_len--; } @@ -2853,7 +2894,7 @@ static int bpf_core_match_member(const struct btf *local_btf, /* * Try to match local spec to a target type and, if successful, produce full - * target spec (high-level, low-level + offset). + * target spec (high-level, low-level + bit offset). */ static int bpf_core_spec_match(struct bpf_core_spec *local_spec, const struct btf *targ_btf, __u32 targ_id, @@ -2916,13 +2957,120 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec, sz = btf__resolve_size(targ_btf, targ_id); if (sz < 0) return sz; - targ_spec->offset += local_acc->idx * sz; + targ_spec->bit_offset += local_acc->idx * sz * 8; } } return 1; } +static int bpf_core_calc_field_relo(const struct bpf_program *prog, + const struct bpf_field_reloc *relo, + const struct bpf_core_spec *spec, + __u32 *val, bool *validate) +{ + const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1]; + const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id); + __u32 byte_off, byte_sz, bit_off, bit_sz; + const struct btf_member *m; + const struct btf_type *mt; + bool bitfield; + __s64 sz; + + /* a[n] accessor needs special handling */ + if (!acc->name) { + if (relo->kind == BPF_FIELD_BYTE_OFFSET) { + *val = spec->bit_offset / 8; + } else if (relo->kind == BPF_FIELD_BYTE_SIZE) { + sz = btf__resolve_size(spec->btf, acc->type_id); + if (sz < 0) + return -EINVAL; + *val = sz; + } else { + pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n", + bpf_program__title(prog, false), + relo->kind, relo->insn_off / 8); + return -EINVAL; + } + if (validate) + *validate = true; + return 0; + } + + m = btf_members(t) + acc->idx; + mt = skip_mods_and_typedefs(spec->btf, m->type, NULL); + bit_off = spec->bit_offset; + bit_sz = btf_member_bitfield_size(t, acc->idx); + + bitfield = bit_sz > 0; + if (bitfield) { + byte_sz = mt->size; + byte_off = bit_off / 8 / byte_sz * byte_sz; + /* figure out smallest int size necessary for bitfield load */ + while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { + if (byte_sz >= 8) { + /* bitfield can't be read with 64-bit read */ + pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n", + bpf_program__title(prog, false), + relo->kind, relo->insn_off / 8); + return -E2BIG; + } + byte_sz *= 2; + byte_off = bit_off / 8 / byte_sz * byte_sz; + } + } else { + sz = btf__resolve_size(spec->btf, m->type); + if (sz < 0) + return -EINVAL; + byte_sz = sz; + byte_off = spec->bit_offset / 8; + bit_sz = byte_sz * 8; + } + + /* for bitfields, all the relocatable aspects are ambiguous and we + * might disagree with compiler, so turn off validation of expected + * value, except for signedness + */ + if (validate) + *validate = !bitfield; + + switch (relo->kind) { + case BPF_FIELD_BYTE_OFFSET: + *val = byte_off; + break; + case BPF_FIELD_BYTE_SIZE: + *val = byte_sz; + break; + case BPF_FIELD_SIGNED: + /* enums will be assumed unsigned */ + *val = btf_is_enum(mt) || + (btf_int_encoding(mt) & BTF_INT_SIGNED); + if (validate) + *validate = true; /* signedness is never ambiguous */ + break; + case BPF_FIELD_LSHIFT_U64: +#if __BYTE_ORDER == __LITTLE_ENDIAN + *val = 64 - (bit_off + bit_sz - byte_off * 8); +#else + *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); +#endif + break; + case BPF_FIELD_RSHIFT_U64: + *val = 64 - bit_sz; + if (validate) + *validate = true; /* right shift is never ambiguous */ + break; + case BPF_FIELD_EXISTS: + default: + pr_warn("prog '%s': unknown relo %d at insn #%d\n", + bpf_program__title(prog, false), + relo->kind, relo->insn_off / 8); + return -EINVAL; + } + + return 0; +} + /* * Patch relocatable BPF instruction. * @@ -2942,36 +3090,31 @@ static int bpf_core_reloc_insn(struct bpf_program *prog, const struct bpf_core_spec *local_spec, const struct bpf_core_spec *targ_spec) { + bool failed = false, validate = true; __u32 orig_val, new_val; struct bpf_insn *insn; - int insn_idx; + int insn_idx, err; __u8 class; if (relo->insn_off % sizeof(struct bpf_insn)) return -EINVAL; insn_idx = relo->insn_off / sizeof(struct bpf_insn); - switch (relo->kind) { - case BPF_FIELD_BYTE_OFFSET: - orig_val = local_spec->offset; - if (targ_spec) { - new_val = targ_spec->offset; - } else { - pr_warn("prog '%s': patching insn #%d w/ failed reloc, imm %d -> %d\n", - bpf_program__title(prog, false), insn_idx, - orig_val, -1); - new_val = (__u32)-1; - } - break; - case BPF_FIELD_EXISTS: + if (relo->kind == BPF_FIELD_EXISTS) { orig_val = 1; /* can't generate EXISTS relo w/o local field */ new_val = targ_spec ? 1 : 0; - break; - default: - pr_warn("prog '%s': unknown relo %d at insn #%d'\n", - bpf_program__title(prog, false), - relo->kind, insn_idx); - return -EINVAL; + } else if (!targ_spec) { + failed = true; + new_val = (__u32)-1; + } else { + err = bpf_core_calc_field_relo(prog, relo, local_spec, + &orig_val, &validate); + if (err) + return err; + err = bpf_core_calc_field_relo(prog, relo, targ_spec, + &new_val, NULL); + if (err) + return err; } insn = &prog->insns[insn_idx]; @@ -2980,12 +3123,17 @@ static int bpf_core_reloc_insn(struct bpf_program *prog, if (class == BPF_ALU || class == BPF_ALU64) { if (BPF_SRC(insn->code) != BPF_K) return -EINVAL; - if (insn->imm != orig_val) + if (!failed && validate && insn->imm != orig_val) { + pr_warn("prog '%s': unexpected insn #%d value: got %u, exp %u -> %u\n", + bpf_program__title(prog, false), insn_idx, + insn->imm, orig_val, new_val); return -EINVAL; + } + orig_val = insn->imm; insn->imm = new_val; - pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n", - bpf_program__title(prog, false), - insn_idx, orig_val, new_val); + pr_debug("prog '%s': patched insn #%d (ALU/ALU64)%s imm %u -> %u\n", + bpf_program__title(prog, false), insn_idx, + failed ? " w/ failed reloc" : "", orig_val, new_val); } else { pr_warn("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n", bpf_program__title(prog, false), @@ -3103,7 +3251,8 @@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec) libbpf_print(level, "%d%s", spec->raw_spec[i], i == spec->raw_len - 1 ? " => " : ":"); - libbpf_print(level, "%u @ &x", spec->offset); + libbpf_print(level, "%u.%u @ &x", + spec->bit_offset / 8, spec->bit_offset % 8); for (i = 0; i < spec->len; i++) { if (spec->spec[i].name) @@ -3217,7 +3366,8 @@ static int bpf_core_reloc_field(struct bpf_program *prog, return -EINVAL; } - pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx); + pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx, + relo->kind); bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec); libbpf_print(LIBBPF_DEBUG, "\n"); @@ -3257,13 +3407,13 @@ static int bpf_core_reloc_field(struct bpf_program *prog, if (j == 0) { targ_spec = cand_spec; - } else if (cand_spec.offset != targ_spec.offset) { + } else if (cand_spec.bit_offset != targ_spec.bit_offset) { /* if there are many candidates, they should all - * resolve to the same offset + * resolve to the same bit offset */ pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n", - prog_name, relo_idx, cand_spec.offset, - targ_spec.offset); + prog_name, relo_idx, cand_spec.bit_offset, + targ_spec.bit_offset); return -EINVAL; } @@ -3408,6 +3558,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, pr_warn("oom in prog realloc\n"); return -ENOMEM; } + prog->insns = new_insn; if (obj->btf_ext) { err = bpf_program_reloc_btf_ext(prog, obj, @@ -3419,7 +3570,6 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, memcpy(new_insn + prog->insns_cnt, text->insns, text->insns_cnt * sizeof(*insn)); - prog->insns = new_insn; prog->main_prog_cnt = prog->insns_cnt; prog->insns_cnt = new_cnt; pr_debug("added %zd insn from %s to prog %s\n", @@ -3427,7 +3577,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, prog->section_name); } insn = &prog->insns[relo->insn_idx]; - insn->imm += prog->main_prog_cnt - relo->insn_idx; + insn->imm += relo->text_off + prog->main_prog_cnt - relo->insn_idx; return 0; } @@ -3566,8 +3716,13 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.insns = insns; load_attr.insns_cnt = insns_cnt; load_attr.license = license; - load_attr.kern_version = kern_version; - load_attr.prog_ifindex = prog->prog_ifindex; + if (prog->type == BPF_PROG_TYPE_TRACING) { + load_attr.attach_prog_fd = prog->attach_prog_fd; + load_attr.attach_btf_id = prog->attach_btf_id; + } else { + load_attr.kern_version = kern_version; + load_attr.prog_ifindex = prog->prog_ifindex; + } /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ if (prog->obj->btf_ext) btf_fd = bpf_object__btf_fd(prog->obj); @@ -3582,7 +3737,6 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.line_info_cnt = prog->line_info_cnt; load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; - load_attr.attach_btf_id = prog->attach_btf_id; retry_load: log_buf = malloc(log_buf_size); @@ -3604,7 +3758,7 @@ retry_load: free(log_buf); goto retry_load; } - ret = -LIBBPF_ERRNO__LOAD; + ret = -errno; cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warn("load bpf program failed: %s\n", cp); @@ -3617,23 +3771,18 @@ retry_load: pr_warn("Program too large (%zu insns), at most %d insns\n", load_attr.insns_cnt, BPF_MAXINSNS); ret = -LIBBPF_ERRNO__PROG2BIG; - } else { + } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { /* Wrong program type? */ - if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { - int fd; - - load_attr.prog_type = BPF_PROG_TYPE_KPROBE; - load_attr.expected_attach_type = 0; - fd = bpf_load_program_xattr(&load_attr, NULL, 0); - if (fd >= 0) { - close(fd); - ret = -LIBBPF_ERRNO__PROGTYPE; - goto out; - } - } + int fd; - if (log_buf) - ret = -LIBBPF_ERRNO__KVER; + load_attr.prog_type = BPF_PROG_TYPE_KPROBE; + load_attr.expected_attach_type = 0; + fd = bpf_load_program_xattr(&load_attr, NULL, 0); + if (fd >= 0) { + close(fd); + ret = -LIBBPF_ERRNO__PROGTYPE; + goto out; + } } out: @@ -3744,8 +3893,9 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) return 0; } -static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id); - +static int libbpf_find_attach_btf_id(const char *name, + enum bpf_attach_type attach_type, + __u32 attach_prog_fd); static struct bpf_object * __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, struct bpf_object_open_opts *opts) @@ -3756,6 +3906,7 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, const char *obj_name; char tmp_name[64]; bool relaxed_maps; + __u32 attach_prog_fd; int err; if (elf_version(EV_CURRENT) == EV_NONE) { @@ -3786,6 +3937,7 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false); relaxed_maps = OPTS_GET(opts, relaxed_maps, false); pin_root_path = OPTS_GET(opts, pin_root_path, NULL); + attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); CHECK_ERR(bpf_object__elf_init(obj), err, out); CHECK_ERR(bpf_object__check_endianness(obj), err, out); @@ -3798,7 +3950,6 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, bpf_object__for_each_program(prog, obj) { enum bpf_prog_type prog_type; enum bpf_attach_type attach_type; - __u32 btf_id; err = libbpf_prog_type_by_name(prog->section_name, &prog_type, &attach_type); @@ -3811,10 +3962,13 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, bpf_program__set_type(prog, prog_type); bpf_program__set_expected_attach_type(prog, attach_type); if (prog_type == BPF_PROG_TYPE_TRACING) { - err = libbpf_attach_btf_id_by_name(prog->section_name, &btf_id); - if (err) + err = libbpf_find_attach_btf_id(prog->section_name, + attach_type, + attach_prog_fd); + if (err <= 0) goto out; - prog->attach_btf_id = btf_id; + prog->attach_btf_id = err; + prog->attach_prog_fd = attach_prog_fd; } } @@ -3911,7 +4065,7 @@ int bpf_object__unload(struct bpf_object *obj) int bpf_object__load_xattr(struct bpf_object_load_attr *attr) { struct bpf_object *obj; - int err; + int err, i; if (!attr) return -EINVAL; @@ -3932,6 +4086,11 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) return 0; out: + /* unpin any maps that were auto-pinned during load */ + for (i = 0; i < obj->nr_maps; i++) + if (obj->maps[i].pinned && !obj->maps[i].reused) + bpf_map__unpin(&obj->maps[i], NULL); + bpf_object__unload(obj); pr_warn("failed to load object '%s'\n", obj->path); return err; @@ -4665,6 +4824,11 @@ int bpf_program__fd(const struct bpf_program *prog) return bpf_program__nth_fd(prog, 0); } +size_t bpf_program__size(const struct bpf_program *prog) +{ + return prog->insns_cnt * sizeof(struct bpf_insn); +} + int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, bpf_program_prep_t prep) { @@ -4813,6 +4977,10 @@ static const struct { BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT), BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING, BPF_TRACE_RAW_TP), + BPF_PROG_BTF("fentry/", BPF_PROG_TYPE_TRACING, + BPF_TRACE_FENTRY), + BPF_PROG_BTF("fexit/", BPF_PROG_TYPE_TRACING, + BPF_TRACE_FEXIT), BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), @@ -4930,43 +5098,94 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, } #define BTF_PREFIX "btf_trace_" -static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id) +int libbpf_find_vmlinux_btf_id(const char *name, + enum bpf_attach_type attach_type) { struct btf *btf = bpf_core_find_kernel_btf(); - char raw_tp_btf_name[128] = BTF_PREFIX; - char *dst = raw_tp_btf_name + sizeof(BTF_PREFIX) - 1; - int ret, i, err = -EINVAL; + char raw_tp_btf[128] = BTF_PREFIX; + char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1; + const char *btf_name; + int err = -EINVAL; + u32 kind; if (IS_ERR(btf)) { pr_warn("vmlinux BTF is not found\n"); return -EINVAL; } - if (!name) + if (attach_type == BPF_TRACE_RAW_TP) { + /* prepend "btf_trace_" prefix per kernel convention */ + strncat(dst, name, sizeof(raw_tp_btf) - sizeof(BTF_PREFIX)); + btf_name = raw_tp_btf; + kind = BTF_KIND_TYPEDEF; + } else { + btf_name = name; + kind = BTF_KIND_FUNC; + } + err = btf__find_by_name_kind(btf, btf_name, kind); + btf__free(btf); + return err; +} + +static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) +{ + struct bpf_prog_info_linear *info_linear; + struct bpf_prog_info *info; + struct btf *btf = NULL; + int err = -EINVAL; + + info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0); + if (IS_ERR_OR_NULL(info_linear)) { + pr_warn("failed get_prog_info_linear for FD %d\n", + attach_prog_fd); + return -EINVAL; + } + info = &info_linear->info; + if (!info->btf_id) { + pr_warn("The target program doesn't have BTF\n"); goto out; + } + if (btf__get_from_id(info->btf_id, &btf)) { + pr_warn("Failed to get BTF of the program\n"); + goto out; + } + err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); + btf__free(btf); + if (err <= 0) { + pr_warn("%s is not found in prog's BTF\n", name); + goto out; + } +out: + free(info_linear); + return err; +} + +static int libbpf_find_attach_btf_id(const char *name, + enum bpf_attach_type attach_type, + __u32 attach_prog_fd) +{ + int i, err; + + if (!name) + return -EINVAL; for (i = 0; i < ARRAY_SIZE(section_names); i++) { if (!section_names[i].is_attach_btf) continue; if (strncmp(name, section_names[i].sec, section_names[i].len)) continue; - /* prepend "btf_trace_" prefix per kernel convention */ - strncat(dst, name + section_names[i].len, - sizeof(raw_tp_btf_name) - sizeof(BTF_PREFIX)); - ret = btf__find_by_name(btf, raw_tp_btf_name); - if (ret <= 0) { - pr_warn("%s is not found in vmlinux BTF\n", dst); - goto out; - } - *btf_id = ret; - err = 0; - goto out; + if (attach_prog_fd) + err = libbpf_find_prog_btf_id(name + section_names[i].len, + attach_prog_fd); + else + err = libbpf_find_vmlinux_btf_id(name + section_names[i].len, + attach_type); + if (err <= 0) + pr_warn("%s is not found in vmlinux BTF\n", name); + return err; } pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name); - err = -ESRCH; -out: - btf__free(btf); - return err; + return -ESRCH; } int libbpf_attach_type_by_name(const char *name, @@ -5594,6 +5813,37 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, return (struct bpf_link *)link; } +struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog) +{ + char errmsg[STRERR_BUFSIZE]; + struct bpf_link_fd *link; + int prog_fd, pfd; + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("program '%s': can't attach before loaded\n", + bpf_program__title(prog, false)); + return ERR_PTR(-EINVAL); + } + + link = malloc(sizeof(*link)); + if (!link) + return ERR_PTR(-ENOMEM); + link->link.destroy = &bpf_link__destroy_fd; + + pfd = bpf_raw_tracepoint_open(NULL, prog_fd); + if (pfd < 0) { + pfd = -errno; + free(link); + pr_warn("program '%s': failed to attach to trace: %s\n", + bpf_program__title(prog, false), + libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + return ERR_PTR(pfd); + } + link->fd = pfd; + return (struct bpf_link *)link; +} + enum bpf_perf_event_ret bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, void **copy_mem, size_t *copy_size, diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 6ddc0419337b..0dbf4bfba0c4 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -108,8 +108,9 @@ struct bpf_object_open_opts { * auto-pinned to that path on load; defaults to "/sys/fs/bpf". */ const char *pin_root_path; + __u32 attach_prog_fd; }; -#define bpf_object_open_opts__last_field pin_root_path +#define bpf_object_open_opts__last_field attach_prog_fd LIBBPF_API struct bpf_object *bpf_object__open(const char *path); LIBBPF_API struct bpf_object * @@ -188,6 +189,8 @@ libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type); LIBBPF_API int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type); +LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name, + enum bpf_attach_type attach_type); /* Accessors of bpf_program */ struct bpf_program; @@ -214,6 +217,9 @@ LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy); +/* returns program size in bytes */ +LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog); + LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_version); LIBBPF_API int bpf_program__fd(const struct bpf_program *prog); @@ -248,6 +254,8 @@ LIBBPF_API struct bpf_link * bpf_program__attach_raw_tracepoint(struct bpf_program *prog, const char *tp_name); +LIBBPF_API struct bpf_link * +bpf_program__attach_trace(struct bpf_program *prog); struct bpf_insn; /* @@ -427,8 +435,18 @@ LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type, struct bpf_object **pobj, int *prog_fd); +struct xdp_link_info { + __u32 prog_id; + __u32 drv_prog_id; + __u32 hw_prog_id; + __u32 skb_prog_id; + __u8 attach_mode; +}; + LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags); LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags); +LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + size_t info_size, __u32 flags); struct perf_buffer; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 86173cbb159d..8ddc2c40e482 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -193,13 +193,18 @@ LIBBPF_0.0.5 { LIBBPF_0.0.6 { global: + bpf_get_link_xdp_info; bpf_map__get_pin_path; bpf_map__is_pinned; bpf_map__set_pin_path; bpf_object__open_file; bpf_object__open_mem; + bpf_program__attach_trace; bpf_program__get_expected_attach_type; bpf_program__get_type; bpf_program__is_tracing; bpf_program__set_tracing; + bpf_program__size; + btf__find_by_name_kind; + libbpf_find_vmlinux_btf_id; } LIBBPF_0.0.5; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index bd6f48ea407b..97ac17a64a58 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -158,7 +158,11 @@ struct bpf_line_info_min { */ enum bpf_field_info_kind { BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_FIELD_BYTE_SIZE = 1, BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_FIELD_SIGNED = 3, + BPF_FIELD_LSHIFT_U64 = 4, + BPF_FIELD_RSHIFT_U64 = 5, }; /* The minimum bpf_field_reloc checked by the loader diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index ce3ec81b71c0..5065c1aa1061 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -12,6 +12,7 @@ #include "bpf.h" #include "libbpf.h" +#include "libbpf_internal.h" #include "nlattr.h" #ifndef SOL_NETLINK @@ -24,7 +25,7 @@ typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t, struct xdp_id_md { int ifindex; __u32 flags; - __u32 id; + struct xdp_link_info info; }; int libbpf_netlink_open(__u32 *nl_pid) @@ -43,7 +44,7 @@ int libbpf_netlink_open(__u32 *nl_pid) if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, &one, sizeof(one)) < 0) { - fprintf(stderr, "Netlink error reporting not supported\n"); + pr_warn("Netlink error reporting not supported\n"); } if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { @@ -202,26 +203,11 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh, return dump_link_nlmsg(cookie, ifi, tb); } -static unsigned char get_xdp_id_attr(unsigned char mode, __u32 flags) -{ - if (mode != XDP_ATTACHED_MULTI) - return IFLA_XDP_PROG_ID; - if (flags & XDP_FLAGS_DRV_MODE) - return IFLA_XDP_DRV_PROG_ID; - if (flags & XDP_FLAGS_HW_MODE) - return IFLA_XDP_HW_PROG_ID; - if (flags & XDP_FLAGS_SKB_MODE) - return IFLA_XDP_SKB_PROG_ID; - - return IFLA_XDP_UNSPEC; -} - -static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb) +static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb) { struct nlattr *xdp_tb[IFLA_XDP_MAX + 1]; struct xdp_id_md *xdp_id = cookie; struct ifinfomsg *ifinfo = msg; - unsigned char mode, xdp_attr; int ret; if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index) @@ -237,27 +223,40 @@ static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb) if (!xdp_tb[IFLA_XDP_ATTACHED]) return 0; - mode = libbpf_nla_getattr_u8(xdp_tb[IFLA_XDP_ATTACHED]); - if (mode == XDP_ATTACHED_NONE) - return 0; + xdp_id->info.attach_mode = libbpf_nla_getattr_u8( + xdp_tb[IFLA_XDP_ATTACHED]); - xdp_attr = get_xdp_id_attr(mode, xdp_id->flags); - if (!xdp_attr || !xdp_tb[xdp_attr]) + if (xdp_id->info.attach_mode == XDP_ATTACHED_NONE) return 0; - xdp_id->id = libbpf_nla_getattr_u32(xdp_tb[xdp_attr]); + if (xdp_tb[IFLA_XDP_PROG_ID]) + xdp_id->info.prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_PROG_ID]); + + if (xdp_tb[IFLA_XDP_SKB_PROG_ID]) + xdp_id->info.skb_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_SKB_PROG_ID]); + + if (xdp_tb[IFLA_XDP_DRV_PROG_ID]) + xdp_id->info.drv_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_DRV_PROG_ID]); + + if (xdp_tb[IFLA_XDP_HW_PROG_ID]) + xdp_id->info.hw_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_HW_PROG_ID]); return 0; } -int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) +int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + size_t info_size, __u32 flags) { struct xdp_id_md xdp_id = {}; int sock, ret; __u32 nl_pid; __u32 mask; - if (flags & ~XDP_FLAGS_MASK) + if (flags & ~XDP_FLAGS_MASK || !info_size) return -EINVAL; /* Check whether the single {HW,DRV,SKB} mode is set */ @@ -273,14 +272,44 @@ int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) xdp_id.ifindex = ifindex; xdp_id.flags = flags; - ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_id, &xdp_id); - if (!ret) - *prog_id = xdp_id.id; + ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_info, &xdp_id); + if (!ret) { + size_t sz = min(info_size, sizeof(xdp_id.info)); + + memcpy(info, &xdp_id.info, sz); + memset((void *) info + sz, 0, info_size - sz); + } close(sock); return ret; } +static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags) +{ + if (info->attach_mode != XDP_ATTACHED_MULTI) + return info->prog_id; + if (flags & XDP_FLAGS_DRV_MODE) + return info->drv_prog_id; + if (flags & XDP_FLAGS_HW_MODE) + return info->hw_prog_id; + if (flags & XDP_FLAGS_SKB_MODE) + return info->skb_prog_id; + + return 0; +} + +int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) +{ + struct xdp_link_info info; + int ret; + + ret = bpf_get_link_xdp_info(ifindex, &info, sizeof(info), flags); + if (!ret) + *prog_id = get_xdp_id(&info, flags); + + return ret; +} + int libbpf_nl_get_link(int sock, unsigned int nl_pid, libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) { diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c index 1e69c0c8d413..8db44bbfc66d 100644 --- a/tools/lib/bpf/nlattr.c +++ b/tools/lib/bpf/nlattr.c @@ -8,6 +8,7 @@ #include <errno.h> #include "nlattr.h" +#include "libbpf_internal.h" #include <linux/rtnetlink.h> #include <string.h> #include <stdio.h> @@ -121,8 +122,8 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, } if (tb[type]) - fprintf(stderr, "Attribute of type %#x found multiple times in message, " - "previous attribute is being ignored.\n", type); + pr_warn("Attribute of type %#x found multiple times in message, " + "previous attribute is being ignored.\n", type); tb[type] = nla; } @@ -181,15 +182,14 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh) if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) { - fprintf(stderr, - "Failed to parse extended error attributes\n"); + pr_warn("Failed to parse extended error attributes\n"); return 0; } if (tb[NLMSGERR_ATTR_MSG]) errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]); - fprintf(stderr, "Kernel error message: %s\n", errmsg); + pr_warn("Kernel error message: %s\n", errmsg); return 0; } diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 74d84f36a5b2..8e0ffa800a71 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -431,13 +431,18 @@ static int xsk_get_max_queues(struct xsk_socket *xsk) goto out; } - if (err || channels.max_combined == 0) + if (err) { /* If the device says it has no channels, then all traffic * is sent to a single stream, so max queues = 1. */ ret = 1; - else - ret = channels.max_combined; + } else { + /* Take the max of rx, tx, combined. Drivers return + * the number of channels in different ways. + */ + ret = max(channels.max_rx, channels.max_tx); + ret = max(ret, (int)channels.max_combined); + } out: close(fd); @@ -553,6 +558,8 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk) } } else { xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id); + if (xsk->prog_fd < 0) + return -errno; err = xsk_lookup_bpf_maps(xsk); if (err) { close(xsk->prog_fd); @@ -560,7 +567,8 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk) } } - err = xsk_set_bpf_maps(xsk); + if (xsk->rx) + err = xsk_set_bpf_maps(xsk); if (err) { xsk_delete_bpf_maps(xsk); close(xsk->prog_fd); @@ -581,18 +589,24 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, struct xsk_socket *xsk; int err; - if (!umem || !xsk_ptr || !rx || !tx) + if (!umem || !xsk_ptr || !(rx || tx)) return -EFAULT; - if (umem->refcount) { - pr_warn("Error: shared umems not supported by libbpf.\n"); - return -EBUSY; - } - xsk = calloc(1, sizeof(*xsk)); if (!xsk) return -ENOMEM; + err = xsk_set_xdp_socket_config(&xsk->config, usr_config); + if (err) + goto out_xsk_alloc; + + if (umem->refcount && + !(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) { + pr_warn("Error: shared umems not supported by libbpf supplied XDP program.\n"); + err = -EBUSY; + goto out_xsk_alloc; + } + if (umem->refcount++ > 0) { xsk->fd = socket(AF_XDP, SOCK_RAW, 0); if (xsk->fd < 0) { @@ -614,10 +628,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, memcpy(xsk->ifname, ifname, IFNAMSIZ - 1); xsk->ifname[IFNAMSIZ - 1] = '\0'; - err = xsk_set_xdp_socket_config(&xsk->config, usr_config); - if (err) - goto out_socket; - if (rx) { err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, &xsk->config.rx_size, @@ -685,7 +695,12 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, sxdp.sxdp_family = PF_XDP; sxdp.sxdp_ifindex = xsk->ifindex; sxdp.sxdp_queue_id = xsk->queue_id; - sxdp.sxdp_flags = xsk->config.bind_flags; + if (umem->refcount > 1) { + sxdp.sxdp_flags = XDP_SHARED_UMEM; + sxdp.sxdp_shared_umem_fd = umem->fd; + } else { + sxdp.sxdp_flags = xsk->config.bind_flags; + } err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp)); if (err) { diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index b334a6db15c1..4fe4aec0367c 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -30,7 +30,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \ test_cgroup_storage test_select_reuseport \ test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \ - test_cgroup_attach xdping test_progs-no_alu32 + test_cgroup_attach test_progs-no_alu32 # Also test bpf-gcc, if present ifneq ($(BPF_GCC),) @@ -38,7 +38,8 @@ TEST_GEN_PROGS += test_progs-bpf_gcc endif TEST_GEN_FILES = -TEST_FILES = +TEST_FILES = test_lwt_ip_encap.o \ + test_tc_edt.o # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ @@ -70,7 +71,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \ # Compile but not part of 'make run_tests' TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \ flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \ - test_lirc_mode2_user + test_lirc_mode2_user xdping TEST_CUSTOM_PROGS = urandom_read @@ -162,6 +163,12 @@ define CLANG_BPF_BUILD_RULE -c $1 -o - || echo "BPF obj compilation failed") | \ $(LLC) -march=bpf -mcpu=probe $4 -filetype=obj -o $2 endef +# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 +define CLANG_NOALU32_BPF_BUILD_RULE + ($(CLANG) $3 -O2 -target bpf -emit-llvm \ + -c $1 -o - || echo "BPF obj compilation failed") | \ + $(LLC) -march=bpf -mcpu=v2 $4 -filetype=obj -o $2 +endef # Similar to CLANG_BPF_BUILD_RULE, but using native Clang and bpf LLC define CLANG_NATIVE_BPF_BUILD_RULE ($(CLANG) $3 -O2 -emit-llvm \ @@ -274,6 +281,7 @@ TRUNNER_BPF_LDFLAGS := -mattr=+alu32 $(eval $(call DEFINE_TEST_RUNNER,test_progs)) # Define test_progs-no_alu32 test runner. +TRUNNER_BPF_BUILD_RULE := CLANG_NOALU32_BPF_BUILD_RULE TRUNNER_BPF_LDFLAGS := $(eval $(call DEFINE_TEST_RUNNER,test_progs,no_alu32)) diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 09dfa75fe948..ec9e2fdd6b89 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> #include "progs/core_reloc_types.h" +#include <sys/mman.h> #define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name) @@ -174,21 +175,82 @@ .fails = true, \ } -#define EXISTENCE_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ - .a = 42, \ -} - #define EXISTENCE_CASE_COMMON(name) \ .case_name = #name, \ .bpf_obj_file = "test_core_reloc_existence.o", \ .btf_src_file = "btf__core_reloc_" #name ".o", \ - .relaxed_core_relocs = true \ + .relaxed_core_relocs = true #define EXISTENCE_ERR_CASE(name) { \ EXISTENCE_CASE_COMMON(name), \ .fails = true, \ } +#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \ + .case_name = test_name_prefix#name, \ + .bpf_obj_file = objfile, \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define BITFIELDS_CASE(name, ...) { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \ + "direct:", name), \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_bitfields_output), \ +}, { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \ + "probed:", name), \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_bitfields_output), \ + .direct_raw_tp = true, \ +} + + +#define BITFIELDS_ERR_CASE(name) { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \ + "probed:", name), \ + .fails = true, \ +}, { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \ + "direct:", name), \ + .direct_raw_tp = true, \ + .fails = true, \ +} + +#define SIZE_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_size.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o", \ + .relaxed_core_relocs = true + +#define SIZE_OUTPUT_DATA(type) \ + STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \ + .int_sz = sizeof(((type *)0)->int_field), \ + .struct_sz = sizeof(((type *)0)->struct_field), \ + .union_sz = sizeof(((type *)0)->union_field), \ + .arr_sz = sizeof(((type *)0)->arr_field), \ + .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \ + .ptr_sz = sizeof(((type *)0)->ptr_field), \ + .enum_sz = sizeof(((type *)0)->enum_field), \ + } + +#define SIZE_CASE(name) { \ + SIZE_CASE_COMMON(name), \ + .input_len = 0, \ + .output = SIZE_OUTPUT_DATA(struct core_reloc_##name), \ + .output_len = sizeof(struct core_reloc_size_output), \ +} + +#define SIZE_ERR_CASE(name) { \ + SIZE_CASE_COMMON(name), \ + .fails = true, \ +} + struct core_reloc_test_case { const char *case_name; const char *bpf_obj_file; @@ -199,6 +261,7 @@ struct core_reloc_test_case { int output_len; bool fails; bool relaxed_core_relocs; + bool direct_raw_tp; }; static struct core_reloc_test_case test_cases[] = { @@ -275,12 +338,6 @@ static struct core_reloc_test_case test_cases[] = { INTS_CASE(ints___bool), INTS_CASE(ints___reverse_sign), - INTS_ERR_CASE(ints___err_bitfield), - INTS_ERR_CASE(ints___err_wrong_sz_8), - INTS_ERR_CASE(ints___err_wrong_sz_16), - INTS_ERR_CASE(ints___err_wrong_sz_32), - INTS_ERR_CASE(ints___err_wrong_sz_64), - /* validate edge cases of capturing relocations */ { .case_name = "misc", @@ -352,6 +409,44 @@ static struct core_reloc_test_case test_cases[] = { EXISTENCE_ERR_CASE(existence__err_arr_kind), EXISTENCE_ERR_CASE(existence__err_arr_value_type), EXISTENCE_ERR_CASE(existence__err_struct_type), + + /* bitfield relocation checks */ + BITFIELDS_CASE(bitfields, { + .ub1 = 1, + .ub2 = 2, + .ub7 = 96, + .sb4 = -7, + .sb20 = -0x76543, + .u32 = 0x80000000, + .s32 = -0x76543210, + }), + BITFIELDS_CASE(bitfields___bit_sz_change, { + .ub1 = 6, + .ub2 = 0xABCDE, + .ub7 = 1, + .sb4 = -1, + .sb20 = -0x17654321, + .u32 = 0xBEEF, + .s32 = -0x3FEDCBA987654321, + }), + BITFIELDS_CASE(bitfields___bitfield_vs_int, { + .ub1 = 0xFEDCBA9876543210, + .ub2 = 0xA6, + .ub7 = -0x7EDCBA987654321, + .sb4 = -0x6123456789ABCDE, + .sb20 = 0xD00D, + .u32 = -0x76543, + .s32 = 0x0ADEADBEEFBADB0B, + }), + BITFIELDS_CASE(bitfields___just_big_enough, { + .ub1 = 0xF, + .ub2 = 0x0812345678FEDCBA, + }), + BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield), + + /* size relocation checks */ + SIZE_CASE(size), + SIZE_CASE(size___diff_sz), }; struct data { @@ -359,18 +454,25 @@ struct data { char out[256]; }; +static size_t roundup_page(size_t sz) +{ + long page_size = sysconf(_SC_PAGE_SIZE); + return (sz + page_size - 1) / page_size * page_size; +} + void test_core_reloc(void) { - const char *probe_name = "raw_tracepoint/sys_enter"; + const size_t mmap_sz = roundup_page(sizeof(struct data)); struct bpf_object_load_attr load_attr = {}; struct core_reloc_test_case *test_case; + const char *tp_name, *probe_name; int err, duration = 0, i, equal; struct bpf_link *link = NULL; struct bpf_map *data_map; struct bpf_program *prog; struct bpf_object *obj; - const int zero = 0; - struct data data; + struct data *data; + void *mmap_data = NULL; for (i = 0; i < ARRAY_SIZE(test_cases); i++) { test_case = &test_cases[i]; @@ -382,11 +484,19 @@ void test_core_reloc(void) ); obj = bpf_object__open_file(test_case->bpf_obj_file, &opts); - if (CHECK(IS_ERR_OR_NULL(obj), "obj_open", - "failed to open '%s': %ld\n", + if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n", test_case->bpf_obj_file, PTR_ERR(obj))) continue; + /* for typed raw tracepoints, NULL should be specified */ + if (test_case->direct_raw_tp) { + probe_name = "tp_btf/sys_enter"; + tp_name = NULL; + } else { + probe_name = "raw_tracepoint/sys_enter"; + tp_name = "sys_enter"; + } + prog = bpf_object__find_program_by_title(obj, probe_name); if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name)) @@ -407,7 +517,7 @@ void test_core_reloc(void) goto cleanup; } - link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + link = bpf_program__attach_raw_tracepoint(prog, tp_name); if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) goto cleanup; @@ -416,24 +526,22 @@ void test_core_reloc(void) if (CHECK(!data_map, "find_data_map", "data map not found\n")) goto cleanup; - memset(&data, 0, sizeof(data)); - memcpy(data.in, test_case->input, test_case->input_len); - - err = bpf_map_update_elem(bpf_map__fd(data_map), - &zero, &data, 0); - if (CHECK(err, "update_data_map", - "failed to update .data map: %d\n", err)) + mmap_data = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, + MAP_SHARED, bpf_map__fd(data_map), 0); + if (CHECK(mmap_data == MAP_FAILED, "mmap", + ".bss mmap failed: %d", errno)) { + mmap_data = NULL; goto cleanup; + } + data = mmap_data; + + memset(mmap_data, 0, sizeof(*data)); + memcpy(data->in, test_case->input, test_case->input_len); /* trigger test run */ usleep(1); - err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &data); - if (CHECK(err, "get_result", - "failed to get output data: %d\n", err)) - goto cleanup; - - equal = memcmp(data.out, test_case->output, + equal = memcmp(data->out, test_case->output, test_case->output_len) == 0; if (CHECK(!equal, "check_result", "input/output data don't match\n")) { @@ -445,12 +553,16 @@ void test_core_reloc(void) } for (j = 0; j < test_case->output_len; j++) { printf("output byte #%d: EXP 0x%02hhx GOT 0x%02hhx\n", - j, test_case->output[j], data.out[j]); + j, test_case->output[j], data->out[j]); } goto cleanup; } cleanup: + if (mmap_data) { + CHECK_FAIL(munmap(mmap_data, mmap_sz)); + mmap_data = NULL; + } if (!IS_ERR_OR_NULL(link)) { bpf_link__destroy(link); link = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c new file mode 100644 index 000000000000..40bcff2cc274 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +void test_fentry_fexit(void) +{ + struct bpf_prog_load_attr attr_fentry = { + .file = "./fentry_test.o", + }; + struct bpf_prog_load_attr attr_fexit = { + .file = "./fexit_test.o", + }; + + struct bpf_object *obj_fentry = NULL, *obj_fexit = NULL, *pkt_obj; + struct bpf_map *data_map_fentry, *data_map_fexit; + char fentry_name[] = "fentry/bpf_fentry_testX"; + char fexit_name[] = "fexit/bpf_fentry_testX"; + int err, pkt_fd, kfree_skb_fd, i; + struct bpf_link *link[12] = {}; + struct bpf_program *prog[12]; + __u32 duration, retval; + const int zero = 0; + u64 result[12]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + err = bpf_prog_load_xattr(&attr_fentry, &obj_fentry, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + err = bpf_prog_load_xattr(&attr_fexit, &obj_fexit, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + + for (i = 0; i < 6; i++) { + fentry_name[sizeof(fentry_name) - 2] = '1' + i; + prog[i] = bpf_object__find_program_by_title(obj_fentry, fentry_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fentry_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map_fentry = bpf_object__find_map_by_name(obj_fentry, "fentry_t.bss"); + if (CHECK(!data_map_fentry, "find_data_map", "data map not found\n")) + goto close_prog; + + for (i = 6; i < 12; i++) { + fexit_name[sizeof(fexit_name) - 2] = '1' + i - 6; + prog[i] = bpf_object__find_program_by_title(obj_fexit, fexit_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fexit_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map_fexit = bpf_object__find_map_by_name(obj_fexit, "fexit_te.bss"); + if (CHECK(!data_map_fexit, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map_fentry), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + err = bpf_map_lookup_elem(bpf_map__fd(data_map_fexit), &zero, result + 6); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < 12; i++) + if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", + i % 6 + 1, result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < 12; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + bpf_object__close(obj_fentry); + bpf_object__close(obj_fexit); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c new file mode 100644 index 000000000000..9fb103193878 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +void test_fentry_test(void) +{ + struct bpf_prog_load_attr attr = { + .file = "./fentry_test.o", + }; + + char prog_name[] = "fentry/bpf_fentry_testX"; + struct bpf_object *obj = NULL, *pkt_obj; + int err, pkt_fd, kfree_skb_fd, i; + struct bpf_link *link[6] = {}; + struct bpf_program *prog[6]; + __u32 duration, retval; + struct bpf_map *data_map; + const int zero = 0; + u64 result[6]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + + for (i = 0; i < 6; i++) { + prog_name[sizeof(prog_name) - 2] = '1' + i; + prog[i] = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map = bpf_object__find_map_by_name(obj, "fentry_t.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < 6; i++) + if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", + i + 1, result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < 6; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c new file mode 100644 index 000000000000..15c7378362dd --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +#define PROG_CNT 3 + +void test_fexit_bpf2bpf(void) +{ + const char *prog_name[PROG_CNT] = { + "fexit/test_pkt_access", + "fexit/test_pkt_access_subprog1", + "fexit/test_pkt_access_subprog2", + }; + struct bpf_object *obj = NULL, *pkt_obj; + int err, pkt_fd, i; + struct bpf_link *link[PROG_CNT] = {}; + struct bpf_program *prog[PROG_CNT]; + __u32 duration, retval; + struct bpf_map *data_map; + const int zero = 0; + u64 result[PROG_CNT]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_UNSPEC, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .attach_prog_fd = pkt_fd, + ); + + obj = bpf_object__open_file("./fexit_bpf2bpf.o", &opts); + if (CHECK(IS_ERR_OR_NULL(obj), "obj_open", + "failed to open fexit_bpf2bpf: %ld\n", + PTR_ERR(obj))) + goto close_prog; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto close_prog; + + for (i = 0; i < PROG_CNT; i++) { + prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i])) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < PROG_CNT; i++) + if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n", + result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < PROG_CNT; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + if (!IS_ERR_OR_NULL(obj)) + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c new file mode 100644 index 000000000000..3b9dbf7433f0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +/* x86-64 fits 55 JITed and 43 interpreted progs into half page */ +#define CNT 40 + +void test_fexit_stress(void) +{ + char test_skb[128] = {}; + int fexit_fd[CNT] = {}; + int link_fd[CNT] = {}; + __u32 duration = 0; + char error[4096]; + __u32 prog_ret; + int err, i, filter_fd; + + const struct bpf_insn trace_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr load_attr = { + .prog_type = BPF_PROG_TYPE_TRACING, + .license = "GPL", + .insns = trace_program, + .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn), + .expected_attach_type = BPF_TRACE_FEXIT, + }; + + const struct bpf_insn skb_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr skb_load_attr = { + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .license = "GPL", + .insns = skb_program, + .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn), + }; + + err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1", + load_attr.expected_attach_type); + if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err)) + goto out; + load_attr.attach_btf_id = err; + + for (i = 0; i < CNT; i++) { + fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error)); + if (CHECK(fexit_fd[i] < 0, "fexit loaded", + "failed: %d errno %d\n", fexit_fd[i], errno)) + goto out; + link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]); + if (CHECK(link_fd[i] < 0, "fexit attach failed", + "prog %d failed: %d err %d\n", i, link_fd[i], errno)) + goto out; + } + + filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error)); + if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", + filter_fd, errno)) + goto out; + + err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, + 0, &prog_ret, 0); + close(filter_fd); + CHECK_FAIL(err); +out: + for (i = 0; i < CNT; i++) { + if (link_fd[i]) + close(link_fd[i]); + if (fexit_fd[i]) + close(fexit_fd[i]); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c new file mode 100644 index 000000000000..f99013222c74 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +void test_fexit_test(void) +{ + struct bpf_prog_load_attr attr = { + .file = "./fexit_test.o", + }; + + char prog_name[] = "fexit/bpf_fentry_testX"; + struct bpf_object *obj = NULL, *pkt_obj; + int err, pkt_fd, kfree_skb_fd, i; + struct bpf_link *link[6] = {}; + struct bpf_program *prog[6]; + __u32 duration, retval; + struct bpf_map *data_map; + const int zero = 0; + u64 result[6]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + + for (i = 0; i < 6; i++) { + prog_name[sizeof(prog_name) - 2] = '1' + i; + prog[i] = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map = bpf_object__find_map_by_name(obj, "fexit_te.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < 6; i++) + if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", + i + 1, result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < 6; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index 430b50de1583..7507c8f689bc 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -1,15 +1,38 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +struct meta { + int ifindex; + __u32 cb32_0; + __u8 cb8_0; +}; + +static union { + __u32 cb32[5]; + __u8 cb8[20]; +} cb = { + .cb32[0] = 0x81828384, +}; + static void on_sample(void *ctx, int cpu, void *data, __u32 size) { - int ifindex = *(int *)data, duration = 0; - struct ipv6_packet *pkt_v6 = data + 4; + struct meta *meta = (struct meta *)data; + struct ipv6_packet *pkt_v6 = data + sizeof(*meta); + int duration = 0; - if (ifindex != 1) + if (CHECK(size != 72 + sizeof(*meta), "check_size", "size %u != %zu\n", + size, 72 + sizeof(*meta))) + return; + if (CHECK(meta->ifindex != 1, "check_meta_ifindex", + "meta->ifindex = %d\n", meta->ifindex)) /* spurious kfree_skb not on loopback device */ return; - if (CHECK(size != 76, "check_size", "size %u != 76\n", size)) + if (CHECK(meta->cb8_0 != cb.cb8[0], "check_cb8_0", "cb8_0 %x != %x\n", + meta->cb8_0, cb.cb8[0])) + return; + if (CHECK(meta->cb32_0 != cb.cb32[0], "check_cb32_0", + "cb32_0 %x != %x\n", + meta->cb32_0, cb.cb32[0])) return; if (CHECK(pkt_v6->eth.h_proto != 0xdd86, "check_eth", "h_proto %x\n", pkt_v6->eth.h_proto)) @@ -26,21 +49,31 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) void test_kfree_skb(void) { + struct __sk_buff skb = {}; + struct bpf_prog_test_run_attr tattr = { + .data_in = &pkt_v6, + .data_size_in = sizeof(pkt_v6), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + }; struct bpf_prog_load_attr attr = { .file = "./kfree_skb.o", }; + struct bpf_link *link = NULL, *link_fentry = NULL, *link_fexit = NULL; + struct bpf_map *perf_buf_map, *global_data; + struct bpf_program *prog, *fentry, *fexit; struct bpf_object *obj, *obj2 = NULL; struct perf_buffer_opts pb_opts = {}; struct perf_buffer *pb = NULL; - struct bpf_link *link = NULL; - struct bpf_map *perf_buf_map; - struct bpf_program *prog; - __u32 duration, retval; - int err, pkt_fd, kfree_skb_fd; + int err, kfree_skb_fd; bool passed = false; + __u32 duration = 0; + const int zero = 0; + bool test_ok[2]; - err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &pkt_fd); + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &tattr.prog_fd); if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) return; @@ -51,9 +84,28 @@ void test_kfree_skb(void) prog = bpf_object__find_program_by_title(obj2, "tp_btf/kfree_skb"); if (CHECK(!prog, "find_prog", "prog kfree_skb not found\n")) goto close_prog; + fentry = bpf_object__find_program_by_title(obj2, "fentry/eth_type_trans"); + if (CHECK(!fentry, "find_prog", "prog eth_type_trans not found\n")) + goto close_prog; + fexit = bpf_object__find_program_by_title(obj2, "fexit/eth_type_trans"); + if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n")) + goto close_prog; + + global_data = bpf_object__find_map_by_name(obj2, "kfree_sk.bss"); + if (CHECK(!global_data, "find global data", "not found\n")) + goto close_prog; + link = bpf_program__attach_raw_tracepoint(prog, NULL); if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) goto close_prog; + link_fentry = bpf_program__attach_trace(fentry); + if (CHECK(IS_ERR(link_fentry), "attach fentry", "err %ld\n", + PTR_ERR(link_fentry))) + goto close_prog; + link_fexit = bpf_program__attach_trace(fexit); + if (CHECK(IS_ERR(link_fexit), "attach fexit", "err %ld\n", + PTR_ERR(link_fexit))) + goto close_prog; perf_buf_map = bpf_object__find_map_by_name(obj2, "perf_buf_map"); if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) @@ -66,24 +118,37 @@ void test_kfree_skb(void) if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) goto close_prog; - err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "ipv6", + memcpy(skb.cb, &cb, sizeof(cb)); + err = bpf_prog_test_run_xattr(&tattr); + duration = tattr.duration; + CHECK(err || tattr.retval, "ipv6", "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); + err, errno, tattr.retval, duration); /* read perf buffer */ err = perf_buffer__poll(pb, 100); if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) goto close_prog; + /* make sure kfree_skb program was triggered * and it sent expected skb into ring buffer */ CHECK_FAIL(!passed); + + err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + CHECK_FAIL(!test_ok[0] || !test_ok[1]); close_prog: perf_buffer__free(pb); if (!IS_ERR_OR_NULL(link)) bpf_link__destroy(link); + if (!IS_ERR_OR_NULL(link_fentry)) + bpf_link__destroy(link_fentry); + if (!IS_ERR_OR_NULL(link_fexit)) + bpf_link__destroy(link_fexit); bpf_object__close(obj); bpf_object__close(obj2); } diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c new file mode 100644 index 000000000000..051a6d48762c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/mmap.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <sys/mman.h> + +struct map_data { + __u64 val[512 * 4]; +}; + +struct bss_data { + __u64 in_val; + __u64 out_val; +}; + +static size_t roundup_page(size_t sz) +{ + long page_size = sysconf(_SC_PAGE_SIZE); + return (sz + page_size - 1) / page_size * page_size; +} + +void test_mmap(void) +{ + const char *file = "test_mmap.o"; + const char *probe_name = "raw_tracepoint/sys_enter"; + const char *tp_name = "sys_enter"; + const size_t bss_sz = roundup_page(sizeof(struct bss_data)); + const size_t map_sz = roundup_page(sizeof(struct map_data)); + const int zero = 0, one = 1, two = 2, far = 1500; + const long page_size = sysconf(_SC_PAGE_SIZE); + int err, duration = 0, i, data_map_fd; + struct bpf_program *prog; + struct bpf_object *obj; + struct bpf_link *link = NULL; + struct bpf_map *data_map, *bss_map; + void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; + volatile struct bss_data *bss_data; + volatile struct map_data *map_data; + __u64 val = 0; + + obj = bpf_object__open_file("test_mmap.o", NULL); + if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n", + file, PTR_ERR(obj))) + return; + prog = bpf_object__find_program_by_title(obj, probe_name); + if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name)) + goto cleanup; + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "failed to load prog '%s': %d\n", + probe_name, err)) + goto cleanup; + + bss_map = bpf_object__find_map_by_name(obj, "test_mma.bss"); + if (CHECK(!bss_map, "find_bss_map", ".bss map not found\n")) + goto cleanup; + data_map = bpf_object__find_map_by_name(obj, "data_map"); + if (CHECK(!data_map, "find_data_map", "data_map map not found\n")) + goto cleanup; + data_map_fd = bpf_map__fd(data_map); + + bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + bpf_map__fd(bss_map), 0); + if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap", + ".bss mmap failed: %d\n", errno)) { + bss_mmaped = NULL; + goto cleanup; + } + /* map as R/W first */ + map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + data_map_fd, 0); + if (CHECK(map_mmaped == MAP_FAILED, "data_mmap", + "data_map mmap failed: %d\n", errno)) { + map_mmaped = NULL; + goto cleanup; + } + + bss_data = bss_mmaped; + map_data = map_mmaped; + + CHECK_FAIL(bss_data->in_val); + CHECK_FAIL(bss_data->out_val); + CHECK_FAIL(map_data->val[0]); + CHECK_FAIL(map_data->val[1]); + CHECK_FAIL(map_data->val[2]); + CHECK_FAIL(map_data->val[far]); + + link = bpf_program__attach_raw_tracepoint(prog, tp_name); + if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) + goto cleanup; + + bss_data->in_val = 123; + val = 111; + CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0)); + + usleep(1); + + CHECK_FAIL(bss_data->in_val != 123); + CHECK_FAIL(bss_data->out_val != 123); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 123); + CHECK_FAIL(map_data->val[far] != 3 * 123); + + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val)); + CHECK_FAIL(val != 111); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val)); + CHECK_FAIL(val != 222); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val)); + CHECK_FAIL(val != 123); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val)); + CHECK_FAIL(val != 3 * 123); + + /* data_map freeze should fail due to R/W mmap() */ + err = bpf_map_freeze(data_map_fd); + if (CHECK(!err || errno != EBUSY, "no_freeze", + "data_map freeze succeeded: err=%d, errno=%d\n", err, errno)) + goto cleanup; + + /* unmap R/W mapping */ + err = munmap(map_mmaped, map_sz); + map_mmaped = NULL; + if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno)) + goto cleanup; + + /* re-map as R/O now */ + map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0); + if (CHECK(map_mmaped == MAP_FAILED, "data_mmap", + "data_map R/O mmap failed: %d\n", errno)) { + map_mmaped = NULL; + goto cleanup; + } + map_data = map_mmaped; + + /* map/unmap in a loop to test ref counting */ + for (i = 0; i < 10; i++) { + int flags = i % 2 ? PROT_READ : PROT_WRITE; + void *p; + + p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0); + if (CHECK_FAIL(p == MAP_FAILED)) + goto cleanup; + err = munmap(p, map_sz); + if (CHECK_FAIL(err)) + goto cleanup; + } + + /* data_map freeze should now succeed due to no R/W mapping */ + err = bpf_map_freeze(data_map_fd); + if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n", + err, errno)) + goto cleanup; + + /* mapping as R/W now should fail */ + tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + data_map_fd, 0); + if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) { + munmap(tmp1, map_sz); + goto cleanup; + } + + bss_data->in_val = 321; + usleep(1); + CHECK_FAIL(bss_data->in_val != 321); + CHECK_FAIL(bss_data->out_val != 321); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 321); + CHECK_FAIL(map_data->val[far] != 3 * 321); + + /* check some more advanced mmap() manipulations */ + + /* map all but last page: pages 1-3 mapped */ + tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED, + data_map_fd, 0); + if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno)) + goto cleanup; + + /* unmap second page: pages 1, 3 mapped */ + err = munmap(tmp1 + page_size, page_size); + if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) { + munmap(tmp1, map_sz); + goto cleanup; + } + + /* map page 2 back */ + tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ, + MAP_SHARED | MAP_FIXED, data_map_fd, 0); + if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) { + munmap(tmp1, page_size); + munmap(tmp1 + 2*page_size, page_size); + goto cleanup; + } + CHECK(tmp1 + page_size != tmp2, "adv_mmap4", + "tmp1: %p, tmp2: %p\n", tmp1, tmp2); + + /* re-map all 4 pages */ + tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, + data_map_fd, 0); + if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) { + munmap(tmp1, 3 * page_size); /* unmap page 1 */ + goto cleanup; + } + CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2); + + map_data = tmp2; + CHECK_FAIL(bss_data->in_val != 321); + CHECK_FAIL(bss_data->out_val != 321); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 321); + CHECK_FAIL(map_data->val[far] != 3 * 321); + + munmap(tmp2, 4 * page_size); +cleanup: + if (bss_mmaped) + CHECK_FAIL(munmap(bss_mmaped, bss_sz)); + if (map_mmaped) + CHECK_FAIL(munmap(map_mmaped, map_sz)); + if (!IS_ERR_OR_NULL(link)) + bpf_link__destroy(link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c index 525388971e08..041952524c55 100644 --- a/tools/testing/selftests/bpf/prog_tests/pinning.c +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -163,12 +163,15 @@ void test_pinning(void) goto out; } - /* swap pin paths of the two maps */ + /* set pin paths so that nopinmap2 will attempt to reuse the map at + * pinpath (which will fail), but not before pinmap has already been + * reused + */ bpf_object__for_each_map(map, obj) { if (!strcmp(bpf_map__name(map), "nopinmap")) + err = bpf_map__set_pin_path(map, nopinpath2); + else if (!strcmp(bpf_map__name(map), "nopinmap2")) err = bpf_map__set_pin_path(map, pinpath); - else if (!strcmp(bpf_map__name(map), "pinmap")) - err = bpf_map__set_pin_path(map, NULL); else continue; @@ -181,6 +184,17 @@ void test_pinning(void) if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno)) goto out; + /* nopinmap2 should have been pinned and cleaned up again */ + err = stat(nopinpath2, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath2", + "err %d errno %d\n", err, errno)) + goto out; + + /* pinmap should still be there */ + err = stat(pinpath, &statbuf); + if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno)) + goto out; + bpf_object__close(obj); /* test auto-pinning at custom path with open opt */ diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c new file mode 100644 index 000000000000..f5a7c832d0f2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_arrays___err_wrong_val_type x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c deleted file mode 100644 index 795a5b729176..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_arrays___err_wrong_val_type1 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c deleted file mode 100644 index 3af74b837c4d..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_arrays___err_wrong_val_type2 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c new file mode 100644 index 000000000000..cff6f1836cc5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_bitfields x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c new file mode 100644 index 000000000000..a1cd157d5451 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_bitfields___bit_sz_change x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c new file mode 100644 index 000000000000..3f2c7b07c456 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_bitfields___bitfield_vs_int x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c new file mode 100644 index 000000000000..f9746d6be399 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_bitfields___err_too_big_bitfield x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c new file mode 100644 index 000000000000..e7c75a6953dd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_bitfields___just_big_enough x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c deleted file mode 100644 index 50369e8320a0..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_bitfield x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c deleted file mode 100644 index 823bac13d641..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_16 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c deleted file mode 100644 index b44f3be18535..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_32 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c deleted file mode 100644 index 9a3dd2099c0f..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_64 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c deleted file mode 100644 index 9f11ef5f6e88..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_8 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c new file mode 100644 index 000000000000..3c80903da5a4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_size x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c new file mode 100644 index 000000000000..6dbd14436b52 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_size___diff_sz x) {} diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h index f5939d9d5c61..9311489e14b2 100644 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h @@ -386,14 +386,7 @@ struct core_reloc_arrays___err_non_array { struct core_reloc_arrays_substruct d[1][2]; }; -struct core_reloc_arrays___err_wrong_val_type1 { - char a[5]; /* char instead of int */ - char b[2][3][4]; - struct core_reloc_arrays_substruct c[3]; - struct core_reloc_arrays_substruct d[1][2]; -}; - -struct core_reloc_arrays___err_wrong_val_type2 { +struct core_reloc_arrays___err_wrong_val_type { int a[5]; char b[2][3][4]; int c[3]; /* value is not a struct */ @@ -589,67 +582,6 @@ struct core_reloc_ints___bool { int64_t s64_field; }; -struct core_reloc_ints___err_bitfield { - uint8_t u8_field; - int8_t s8_field; - uint16_t u16_field; - int16_t s16_field; - uint32_t u32_field: 32; /* bitfields are not supported */ - int32_t s32_field; - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_8 { - uint16_t u8_field; /* not 8-bit anymore */ - int16_t s8_field; /* not 8-bit anymore */ - - uint16_t u16_field; - int16_t s16_field; - uint32_t u32_field; - int32_t s32_field; - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_16 { - uint8_t u8_field; - int8_t s8_field; - - uint32_t u16_field; /* not 16-bit anymore */ - int32_t s16_field; /* not 16-bit anymore */ - - uint32_t u32_field; - int32_t s32_field; - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_32 { - uint8_t u8_field; - int8_t s8_field; - uint16_t u16_field; - int16_t s16_field; - - uint64_t u32_field; /* not 32-bit anymore */ - int64_t s32_field; /* not 32-bit anymore */ - - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_64 { - uint8_t u8_field; - int8_t s8_field; - uint16_t u16_field; - int16_t s16_field; - uint32_t u32_field; - int32_t s32_field; - - uint32_t u64_field; /* not 64-bit anymore */ - int32_t s64_field; /* not 64-bit anymore */ -}; - /* * MISC */ @@ -730,3 +662,106 @@ struct core_reloc_existence___err_wrong_arr_value_type { struct core_reloc_existence___err_wrong_struct_type { int s; }; + +/* + * BITFIELDS + */ +/* bitfield read results, all as plain integers */ +struct core_reloc_bitfields_output { + int64_t ub1; + int64_t ub2; + int64_t ub7; + int64_t sb4; + int64_t sb20; + int64_t u32; + int64_t s32; +}; + +struct core_reloc_bitfields { + /* unsigned bitfields */ + uint8_t ub1: 1; + uint8_t ub2: 2; + uint32_t ub7: 7; + /* signed bitfields */ + int8_t sb4: 4; + int32_t sb20: 20; + /* non-bitfields */ + uint32_t u32; + int32_t s32; +}; + +/* different bit sizes (both up and down) */ +struct core_reloc_bitfields___bit_sz_change { + /* unsigned bitfields */ + uint16_t ub1: 3; /* 1 -> 3 */ + uint32_t ub2: 20; /* 2 -> 20 */ + uint8_t ub7: 1; /* 7 -> 1 */ + /* signed bitfields */ + int8_t sb4: 1; /* 4 -> 1 */ + int32_t sb20: 30; /* 20 -> 30 */ + /* non-bitfields */ + uint16_t u32; /* 32 -> 16 */ + int64_t s32; /* 32 -> 64 */ +}; + +/* turn bitfield into non-bitfield and vice versa */ +struct core_reloc_bitfields___bitfield_vs_int { + uint64_t ub1; /* 3 -> 64 non-bitfield */ + uint8_t ub2; /* 20 -> 8 non-bitfield */ + int64_t ub7; /* 7 -> 64 non-bitfield signed */ + int64_t sb4; /* 4 -> 64 non-bitfield signed */ + uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */ + int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */ + uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */ +}; + +struct core_reloc_bitfields___just_big_enough { + uint64_t ub1: 4; + uint64_t ub2: 60; /* packed tightly */ + uint32_t ub7; + uint32_t sb4; + uint32_t sb20; + uint32_t u32; + uint32_t s32; +} __attribute__((packed)) ; + +struct core_reloc_bitfields___err_too_big_bitfield { + uint64_t ub1: 4; + uint64_t ub2: 61; /* packed tightly */ + uint32_t ub7; + uint32_t sb4; + uint32_t sb20; + uint32_t u32; + uint32_t s32; +} __attribute__((packed)) ; + +/* + * SIZE + */ +struct core_reloc_size_output { + int int_sz; + int struct_sz; + int union_sz; + int arr_sz; + int arr_elem_sz; + int ptr_sz; + int enum_sz; +}; + +struct core_reloc_size { + int int_field; + struct { int x; } struct_field; + union { int x; } union_field; + int arr_field[4]; + void *ptr_field; + enum { VALUE = 123 } enum_field; +}; + +struct core_reloc_size___diff_sz { + uint64_t int_field; + struct { int x; int y; int z; } struct_field; + union { int x; char bla[123]; } union_field; + char arr_field[10]; + void *ptr_field; + enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field; +}; diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c new file mode 100644 index 000000000000..545788bf8d50 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fentry_test.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <linux/bpf.h> +#include "bpf_helpers.h" + +char _license[] SEC("license") = "GPL"; + +struct test1 { + ks32 a; +}; +static volatile __u64 test1_result; +SEC("fentry/bpf_fentry_test1") +int test1(struct test1 *ctx) +{ + test1_result = ctx->a == 1; + return 0; +} + +struct test2 { + ks32 a; + ku64 b; +}; +static volatile __u64 test2_result; +SEC("fentry/bpf_fentry_test2") +int test2(struct test2 *ctx) +{ + test2_result = ctx->a == 2 && ctx->b == 3; + return 0; +} + +struct test3 { + ks8 a; + ks32 b; + ku64 c; +}; +static volatile __u64 test3_result; +SEC("fentry/bpf_fentry_test3") +int test3(struct test3 *ctx) +{ + test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6; + return 0; +} + +struct test4 { + void *a; + ks8 b; + ks32 c; + ku64 d; +}; +static volatile __u64 test4_result; +SEC("fentry/bpf_fentry_test4") +int test4(struct test4 *ctx) +{ + test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 && + ctx->d == 10; + return 0; +} + +struct test5 { + ku64 a; + void *b; + ks16 c; + ks32 d; + ku64 e; +}; +static volatile __u64 test5_result; +SEC("fentry/bpf_fentry_test5") +int test5(struct test5 *ctx) +{ + test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 && + ctx->d == 14 && ctx->e == 15; + return 0; +} + +struct test6 { + ku64 a; + void *b; + ks16 c; + ks32 d; + void *e; + ks64 f; +}; +static volatile __u64 test6_result; +SEC("fentry/bpf_fentry_test6") +int test6(struct test6 *ctx) +{ + test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 && + ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c new file mode 100644 index 000000000000..981f0474da5a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <linux/bpf.h> +#include "bpf_helpers.h" + +struct sk_buff { + unsigned int len; +}; + +struct args { + struct sk_buff *skb; + ks32 ret; +}; +static volatile __u64 test_result; +SEC("fexit/test_pkt_access") +int test_main(struct args *ctx) +{ + struct sk_buff *skb = ctx->skb; + int len; + + __builtin_preserve_access_index(({ + len = skb->len; + })); + if (len != 74 || ctx->ret != 0) + return 0; + test_result = 1; + return 0; +} + +struct args_subprog1 { + struct sk_buff *skb; + ks32 ret; +}; +static volatile __u64 test_result_subprog1; +SEC("fexit/test_pkt_access_subprog1") +int test_subprog1(struct args_subprog1 *ctx) +{ + struct sk_buff *skb = ctx->skb; + int len; + + __builtin_preserve_access_index(({ + len = skb->len; + })); + if (len != 74 || ctx->ret != 148) + return 0; + test_result_subprog1 = 1; + return 0; +} + +/* Though test_pkt_access_subprog2() is defined in C as: + * static __attribute__ ((noinline)) + * int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb) + * { + * return skb->len * val; + * } + * llvm optimizations remove 'int val' argument and generate BPF assembly: + * r0 = *(u32 *)(r1 + 0) + * w0 <<= 1 + * exit + * In such case the verifier falls back to conservative and + * tracing program can access arguments and return value as u64 + * instead of accurate types. + */ +struct args_subprog2 { + ku64 args[5]; + ku64 ret; +}; +static volatile __u64 test_result_subprog2; +SEC("fexit/test_pkt_access_subprog2") +int test_subprog2(struct args_subprog2 *ctx) +{ + struct sk_buff *skb = (void *)ctx->args[0]; + __u64 ret; + int len; + + bpf_probe_read_kernel(&len, sizeof(len), + __builtin_preserve_access_index(&skb->len)); + + ret = ctx->ret; + /* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32 + * which randomizes upper 32 bits after BPF_ALU32 insns. + * Hence after 'w0 <<= 1' upper bits of $rax are random. + * That is expected and correct. Trim them. + */ + ret = (__u32) ret; + if (len != 74 || ret != 148) + return 0; + test_result_subprog2 = 1; + return 0; +} +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c new file mode 100644 index 000000000000..8b98b1a51784 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fexit_test.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <linux/bpf.h> +#include "bpf_helpers.h" + +char _license[] SEC("license") = "GPL"; + +struct test1 { + ks32 a; + ks32 ret; +}; +static volatile __u64 test1_result; +SEC("fexit/bpf_fentry_test1") +int test1(struct test1 *ctx) +{ + test1_result = ctx->a == 1 && ctx->ret == 2; + return 0; +} + +struct test2 { + ks32 a; + ku64 b; + ks32 ret; +}; +static volatile __u64 test2_result; +SEC("fexit/bpf_fentry_test2") +int test2(struct test2 *ctx) +{ + test2_result = ctx->a == 2 && ctx->b == 3 && ctx->ret == 5; + return 0; +} + +struct test3 { + ks8 a; + ks32 b; + ku64 c; + ks32 ret; +}; +static volatile __u64 test3_result; +SEC("fexit/bpf_fentry_test3") +int test3(struct test3 *ctx) +{ + test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6 && + ctx->ret == 15; + return 0; +} + +struct test4 { + void *a; + ks8 b; + ks32 c; + ku64 d; + ks32 ret; +}; +static volatile __u64 test4_result; +SEC("fexit/bpf_fentry_test4") +int test4(struct test4 *ctx) +{ + test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 && + ctx->d == 10 && ctx->ret == 34; + return 0; +} + +struct test5 { + ku64 a; + void *b; + ks16 c; + ks32 d; + ku64 e; + ks32 ret; +}; +static volatile __u64 test5_result; +SEC("fexit/bpf_fentry_test5") +int test5(struct test5 *ctx) +{ + test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 && + ctx->d == 14 && ctx->e == 15 && ctx->ret == 65; + return 0; +} + +struct test6 { + ku64 a; + void *b; + ks16 c; + ks32 d; + void *e; + ks64 f; + ks32 ret; +}; +static volatile __u64 test6_result; +SEC("fexit/bpf_fentry_test6") +int test6(struct test6 *ctx) +{ + test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 && + ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21 && + ctx->ret == 111; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c index 489319ea1d6a..dcc9feac8338 100644 --- a/tools/testing/selftests/bpf/progs/kfree_skb.c +++ b/tools/testing/selftests/bpf/progs/kfree_skb.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook #include <linux/bpf.h> +#include <stdbool.h> #include "bpf_helpers.h" #include "bpf_endian.h" @@ -43,6 +44,7 @@ struct sk_buff { refcount_t users; unsigned char *data; char __pkt_type_offset[0]; + char cb[48]; }; /* copy arguments from @@ -57,28 +59,41 @@ struct trace_kfree_skb { void *location; }; +struct meta { + int ifindex; + __u32 cb32_0; + __u8 cb8_0; +}; + SEC("tp_btf/kfree_skb") int trace_kfree_skb(struct trace_kfree_skb *ctx) { struct sk_buff *skb = ctx->skb; struct net_device *dev; - int ifindex; struct callback_head *ptr; void *func; int users; unsigned char *data; unsigned short pkt_data; + struct meta meta = {}; char pkt_type; + __u32 *cb32; + __u8 *cb8; __builtin_preserve_access_index(({ users = skb->users.refs.counter; data = skb->data; dev = skb->dev; - ifindex = dev->ifindex; ptr = dev->ifalias->rcuhead.next; func = ptr->func; + cb8 = (__u8 *)&skb->cb; + cb32 = (__u32 *)&skb->cb; })); + meta.ifindex = _(dev->ifindex); + meta.cb8_0 = cb8[8]; + meta.cb32_0 = cb32[2]; + bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset)); pkt_type &= 7; @@ -90,14 +105,66 @@ int trace_kfree_skb(struct trace_kfree_skb *ctx) _(skb->len), users, pkt_type); bpf_printk("skb->queue_mapping %d\n", _(skb->queue_mapping)); bpf_printk("dev->ifindex %d data %llx pkt_data %x\n", - ifindex, data, pkt_data); + meta.ifindex, data, pkt_data); + bpf_printk("cb8_0:%x cb32_0:%x\n", meta.cb8_0, meta.cb32_0); - if (users != 1 || pkt_data != bpf_htons(0x86dd) || ifindex != 1) + if (users != 1 || pkt_data != bpf_htons(0x86dd) || meta.ifindex != 1) /* raw tp ignores return value */ return 0; /* send first 72 byte of the packet to user space */ bpf_skb_output(skb, &perf_buf_map, (72ull << 32) | BPF_F_CURRENT_CPU, - &ifindex, sizeof(ifindex)); + &meta, sizeof(meta)); + return 0; +} + +static volatile struct { + bool fentry_test_ok; + bool fexit_test_ok; +} result; + +struct eth_type_trans_args { + struct sk_buff *skb; + struct net_device *dev; + unsigned short protocol; /* return value available to fexit progs */ +}; + +SEC("fentry/eth_type_trans") +int fentry_eth_type_trans(struct eth_type_trans_args *ctx) +{ + struct sk_buff *skb = ctx->skb; + struct net_device *dev = ctx->dev; + int len, ifindex; + + __builtin_preserve_access_index(({ + len = skb->len; + ifindex = dev->ifindex; + })); + + /* fentry sees full packet including L2 header */ + if (len != 74 || ifindex != 1) + return 0; + result.fentry_test_ok = true; + return 0; +} + +SEC("fexit/eth_type_trans") +int fexit_eth_type_trans(struct eth_type_trans_args *ctx) +{ + struct sk_buff *skb = ctx->skb; + struct net_device *dev = ctx->dev; + int len, ifindex; + + __builtin_preserve_access_index(({ + len = skb->len; + ifindex = dev->ifindex; + })); + + /* fexit sees packet without L2 header that eth_type_trans should have + * consumed. + */ + if (len != 60 || ctx->protocol != bpf_htons(0x86dd) || ifindex != 1) + return 0; + result.fexit_test_ok = true; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c index 763c51447c19..62ad7e22105e 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c @@ -26,7 +26,7 @@ struct dummy_tracepoint_args { }; __attribute__((noinline)) -static int test_long_fname_2(struct dummy_tracepoint_args *arg) +int test_long_fname_2(struct dummy_tracepoint_args *arg) { struct ipv_counts *counts; int key = 0; @@ -44,7 +44,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg) } __attribute__((noinline)) -static int test_long_fname_1(struct dummy_tracepoint_args *arg) +int test_long_fname_1(struct dummy_tracepoint_args *arg) { return test_long_fname_2(arg); } diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c index 96f9e8451029..fb8d91a1dbe0 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c @@ -34,7 +34,7 @@ struct dummy_tracepoint_args { }; __attribute__((noinline)) -static int test_long_fname_2(struct dummy_tracepoint_args *arg) +int test_long_fname_2(struct dummy_tracepoint_args *arg) { struct ipv_counts *counts; int key = 0; @@ -57,7 +57,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg) } __attribute__((noinline)) -static int test_long_fname_1(struct dummy_tracepoint_args *arg) +int test_long_fname_1(struct dummy_tracepoint_args *arg) { return test_long_fname_2(arg); } diff --git a/tools/testing/selftests/bpf/progs/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c index 434188c37774..3f4422044759 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_nokv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c @@ -23,7 +23,7 @@ struct dummy_tracepoint_args { }; __attribute__((noinline)) -static int test_long_fname_2(struct dummy_tracepoint_args *arg) +int test_long_fname_2(struct dummy_tracepoint_args *arg) { struct ipv_counts *counts; int key = 0; @@ -41,7 +41,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg) } __attribute__((noinline)) -static int test_long_fname_1(struct dummy_tracepoint_args *arg) +int test_long_fname_1(struct dummy_tracepoint_args *arg) { return test_long_fname_2(arg); } diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c new file mode 100644 index 000000000000..738b34b72655 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <linux/bpf.h> +#include <stdint.h> +#include "bpf_helpers.h" +#include "bpf_core_read.h" + +char _license[] SEC("license") = "GPL"; + +static volatile struct data { + char in[256]; + char out[256]; +} data; + +struct core_reloc_bitfields { + /* unsigned bitfields */ + uint8_t ub1: 1; + uint8_t ub2: 2; + uint32_t ub7: 7; + /* signed bitfields */ + int8_t sb4: 4; + int32_t sb20: 20; + /* non-bitfields */ + uint32_t u32; + int32_t s32; +}; + +/* bitfield read results, all as plain integers */ +struct core_reloc_bitfields_output { + int64_t ub1; + int64_t ub2; + int64_t ub7; + int64_t sb4; + int64_t sb20; + int64_t u32; + int64_t s32; +}; + +struct pt_regs; + +struct trace_sys_enter { + struct pt_regs *regs; + long id; +}; + +SEC("tp_btf/sys_enter") +int test_core_bitfields_direct(void *ctx) +{ + struct core_reloc_bitfields *in = (void *)&data.in; + struct core_reloc_bitfields_output *out = (void *)&data.out; + + out->ub1 = BPF_CORE_READ_BITFIELD(in, ub1); + out->ub2 = BPF_CORE_READ_BITFIELD(in, ub2); + out->ub7 = BPF_CORE_READ_BITFIELD(in, ub7); + out->sb4 = BPF_CORE_READ_BITFIELD(in, sb4); + out->sb20 = BPF_CORE_READ_BITFIELD(in, sb20); + out->u32 = BPF_CORE_READ_BITFIELD(in, u32); + out->s32 = BPF_CORE_READ_BITFIELD(in, s32); + + return 0; +} + diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c new file mode 100644 index 000000000000..e466e3ab7de4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <linux/bpf.h> +#include <stdint.h> +#include "bpf_helpers.h" +#include "bpf_core_read.h" + +char _license[] SEC("license") = "GPL"; + +static volatile struct data { + char in[256]; + char out[256]; +} data; + +struct core_reloc_bitfields { + /* unsigned bitfields */ + uint8_t ub1: 1; + uint8_t ub2: 2; + uint32_t ub7: 7; + /* signed bitfields */ + int8_t sb4: 4; + int32_t sb20: 20; + /* non-bitfields */ + uint32_t u32; + int32_t s32; +}; + +/* bitfield read results, all as plain integers */ +struct core_reloc_bitfields_output { + int64_t ub1; + int64_t ub2; + int64_t ub7; + int64_t sb4; + int64_t sb20; + int64_t u32; + int64_t s32; +}; + +SEC("raw_tracepoint/sys_enter") +int test_core_bitfields(void *ctx) +{ + struct core_reloc_bitfields *in = (void *)&data.in; + struct core_reloc_bitfields_output *out = (void *)&data.out; + uint64_t res; + + out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1); + out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2); + out->ub7 = BPF_CORE_READ_BITFIELD_PROBED(in, ub7); + out->sb4 = BPF_CORE_READ_BITFIELD_PROBED(in, sb4); + out->sb20 = BPF_CORE_READ_BITFIELD_PROBED(in, sb20); + out->u32 = BPF_CORE_READ_BITFIELD_PROBED(in, u32); + out->s32 = BPF_CORE_READ_BITFIELD_PROBED(in, s32); + + return 0; +} + diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c new file mode 100644 index 000000000000..9a92998d9107 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <linux/bpf.h> +#include <stdint.h> +#include "bpf_helpers.h" +#include "bpf_core_read.h" + +char _license[] SEC("license") = "GPL"; + +static volatile struct data { + char in[256]; + char out[256]; +} data; + +struct core_reloc_size_output { + int int_sz; + int struct_sz; + int union_sz; + int arr_sz; + int arr_elem_sz; + int ptr_sz; + int enum_sz; +}; + +struct core_reloc_size { + int int_field; + struct { int x; } struct_field; + union { int x; } union_field; + int arr_field[4]; + void *ptr_field; + enum { VALUE = 123 } enum_field; +}; + +SEC("raw_tracepoint/sys_enter") +int test_core_size(void *ctx) +{ + struct core_reloc_size *in = (void *)&data.in; + struct core_reloc_size_output *out = (void *)&data.out; + + out->int_sz = bpf_core_field_size(in->int_field); + out->struct_sz = bpf_core_field_size(in->struct_field); + out->union_sz = bpf_core_field_size(in->union_field); + out->arr_sz = bpf_core_field_size(in->arr_field); + out->arr_elem_sz = bpf_core_field_size(in->arr_field[0]); + out->ptr_sz = bpf_core_field_size(in->ptr_field); + out->enum_sz = bpf_core_field_size(in->enum_field); + + return 0; +} + diff --git a/tools/testing/selftests/bpf/progs/test_mmap.c b/tools/testing/selftests/bpf/progs/test_mmap.c new file mode 100644 index 000000000000..0d2ec9fbcf61 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_mmap.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <linux/bpf.h> +#include <stdint.h> +#include "bpf_helpers.h" + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 512 * 4); /* at least 4 pages of data */ + __uint(map_flags, BPF_F_MMAPABLE); + __type(key, __u32); + __type(value, __u64); +} data_map SEC(".maps"); + +static volatile __u64 in_val; +static volatile __u64 out_val; + +SEC("raw_tracepoint/sys_enter") +int test_mmap(void *ctx) +{ + int zero = 0, one = 1, two = 2, far = 1500; + __u64 val, *p; + + out_val = in_val; + + /* data_map[2] = in_val; */ + bpf_map_update_elem(&data_map, &two, (const void *)&in_val, 0); + + /* data_map[1] = data_map[0] * 2; */ + p = bpf_map_lookup_elem(&data_map, &zero); + if (p) { + val = (*p) * 2; + bpf_map_update_elem(&data_map, &one, &val, 0); + } + + /* data_map[far] = in_val * 3; */ + val = in_val * 3; + bpf_map_update_elem(&data_map, &far, &val, 0); + + return 0; +} + diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c index f69a4a50d056..f20e7e00373f 100644 --- a/tools/testing/selftests/bpf/progs/test_pinning.c +++ b/tools/testing/selftests/bpf/progs/test_pinning.c @@ -21,7 +21,7 @@ struct { } nopinmap SEC(".maps"); struct { - __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, 1); __type(key, __u32); __type(value, __u64); diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c index 7cf42d14103f..3a7b4b607ed3 100644 --- a/tools/testing/selftests/bpf/progs/test_pkt_access.c +++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c @@ -17,8 +17,38 @@ #define barrier() __asm__ __volatile__("": : :"memory") int _version SEC("version") = 1; -SEC("test1") -int process(struct __sk_buff *skb) +/* llvm will optimize both subprograms into exactly the same BPF assembly + * + * Disassembly of section .text: + * + * 0000000000000000 test_pkt_access_subprog1: + * ; return skb->len * 2; + * 0: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0) + * 1: 64 00 00 00 01 00 00 00 w0 <<= 1 + * 2: 95 00 00 00 00 00 00 00 exit + * + * 0000000000000018 test_pkt_access_subprog2: + * ; return skb->len * val; + * 3: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0) + * 4: 64 00 00 00 01 00 00 00 w0 <<= 1 + * 5: 95 00 00 00 00 00 00 00 exit + * + * Which makes it an interesting test for BTF-enabled verifier. + */ +static __attribute__ ((noinline)) +int test_pkt_access_subprog1(volatile struct __sk_buff *skb) +{ + return skb->len * 2; +} + +static __attribute__ ((noinline)) +int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb) +{ + return skb->len * val; +} + +SEC("classifier/test_pkt_access") +int test_pkt_access(struct __sk_buff *skb) { void *data_end = (void *)(long)skb->data_end; void *data = (void *)(long)skb->data; @@ -48,6 +78,10 @@ int process(struct __sk_buff *skb) tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len); } + if (test_pkt_access_subprog1(skb) != skb->len * 2) + return TC_ACT_SHOT; + if (test_pkt_access_subprog2(2, skb) != skb->len * 2) + return TC_ACT_SHOT; if (tcp) { if (((void *)(tcp) + 20) > data_end || proto != 6) return TC_ACT_SHOT; diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c index c4d104428643..69880c1e7700 100644 --- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c +++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c @@ -132,8 +132,10 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb, *pad_off = 0; // we can only go as far as ~10 TLVs due to the BPF max stack size + // workaround: define induction variable "i" as "long" instead + // of "int" to prevent alu32 sub-register spilling. #pragma clang loop unroll(disable) - for (int i = 0; i < 100; i++) { + for (long i = 0; i < 100; i++) { struct sr6_tlv_t tlv; if (cur_off == *tlv_off) diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c index 608a06871572..d22e438198cf 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c @@ -44,7 +44,10 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx) unsigned long tcp_mem[TCP_MEM_LOOPS] = {}; char value[MAX_VALUE_STR_LEN]; unsigned char i, off = 0; - int ret; + /* a workaround to prevent compiler from generating + * codes verifier cannot handle yet. + */ + volatile int ret; if (ctx->write) return 0; diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh index ff0d31d38061..7c76b841b17b 100755 --- a/tools/testing/selftests/bpf/test_tc_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh @@ -62,6 +62,10 @@ cleanup() { if [[ -f "${infile}" ]]; then rm "${infile}" fi + + if [[ -n $server_pid ]]; then + kill $server_pid 2> /dev/null + fi } server_listen() { @@ -77,6 +81,7 @@ client_connect() { verify_data() { wait "${server_pid}" + server_pid= # sha1sum returns two fields [sha1] [filepath] # convert to bash array and access first elem insum=($(sha1sum ${infile})) diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh index 2b5f4f7cc905..7b2acba82a49 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh @@ -8,6 +8,11 @@ source $lib_dir/lib.sh source $lib_dir/tc_common.sh source $lib_dir/devlink_lib.sh +if [ "$DEVLINK_VIDDID" != "15b3:cf6c" ]; then + echo "SKIP: test is tailored for Mellanox Spectrum-2" + exit 1 +fi + current_test="" cleanup() @@ -16,11 +21,13 @@ cleanup() if [ ! -z $current_test ]; then ${current_test}_cleanup fi + # Need to reload in order to avoid router abort. + devlink_reload } trap cleanup EXIT -ALL_TESTS="tc_flower mirror_gre" +ALL_TESTS="router tc_flower mirror_gre" for current_test in ${TESTS:-$ALL_TESTS}; do source ${current_test}_scale.sh @@ -34,6 +41,7 @@ for current_test in ${TESTS:-$ALL_TESTS}; do setup_wait $num_netifs ${current_test}_test "$target" "$should_fail" ${current_test}_cleanup + devlink_reload if [[ "$should_fail" -eq 0 ]]; then log_test "'$current_test' $target" else diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh new file mode 100644 index 000000000000..1897e163e3ab --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/router_scale.sh @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../router_scale.sh + +router_get_target() +{ + local should_fail=$1 + local target + + target=$(devlink_resource_size_get kvd) + + if [[ $should_fail -eq 0 ]]; then + target=$((target * 85 / 100)) + else + target=$((target + 1)) + fi + + echo $target +} diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh new file mode 100755 index 000000000000..eb8e2a23bbb4 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ethtool.sh @@ -0,0 +1,318 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS=" + same_speeds_autoneg_off + different_speeds_autoneg_off + combination_of_neg_on_and_off + advertise_subset_of_speeds + check_highest_speed_is_chosen + different_speeds_autoneg_on +" +NUM_NETIFS=2 +source lib.sh +source ethtool_lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/24 +} + +h1_destroy() +{ + simple_if_fini $h1 192.0.2.1/24 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/24 +} + +h2_destroy() +{ + simple_if_fini $h2 192.0.2.2/24 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + h2=${NETIFS[p2]} + + h1_create + h2_create +} + +cleanup() +{ + pre_cleanup + + h2_destroy + h1_destroy +} + +different_speeds_get() +{ + local dev1=$1; shift + local dev2=$1; shift + local with_mode=$1; shift + local adver=$1; shift + + local -a speeds_arr + + speeds_arr=($(common_speeds_get $dev1 $dev2 $with_mode $adver)) + if [[ ${#speeds_arr[@]} < 2 ]]; then + check_err 1 "cannot check different speeds. There are not enough speeds" + fi + + echo ${speeds_arr[0]} ${speeds_arr[1]} +} + +same_speeds_autoneg_off() +{ + # Check that when each of the reported speeds is forced, the links come + # up and are operational. + local -a speeds_arr=($(common_speeds_get $h1 $h2 0 0)) + + for speed in "${speeds_arr[@]}"; do + RET=0 + ethtool_set $h1 speed $speed autoneg off + ethtool_set $h2 speed $speed autoneg off + + setup_wait_dev_with_timeout $h1 + setup_wait_dev_with_timeout $h2 + ping_do $h1 192.0.2.2 + check_err $? "speed $speed autoneg off" + log_test "force of same speed autoneg off" + log_info "speed = $speed" + done + + ethtool -s $h2 autoneg on + ethtool -s $h1 autoneg on +} + +different_speeds_autoneg_off() +{ + # Test that when we force different speeds, links are not up and ping + # fails. + RET=0 + + local -a speeds_arr=($(different_speeds_get $h1 $h2 0 0)) + local speed1=${speeds_arr[0]} + local speed2=${speeds_arr[1]} + + ethtool_set $h1 speed $speed1 autoneg off + ethtool_set $h2 speed $speed2 autoneg off + + setup_wait_dev_with_timeout $h1 + setup_wait_dev_with_timeout $h2 + ping_do $h1 192.0.2.2 + check_fail $? "ping with different speeds" + + log_test "force of different speeds autoneg off" + + ethtool -s $h2 autoneg on + ethtool -s $h1 autoneg on +} + +combination_of_neg_on_and_off() +{ + # Test that when one device is forced to a speed supported by both + # endpoints and the other device is configured to autoneg on, the links + # are up and ping passes. + local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1)) + + for speed in "${speeds_arr[@]}"; do + RET=0 + ethtool_set $h1 speed $speed autoneg off + + setup_wait_dev_with_timeout $h1 + setup_wait_dev_with_timeout $h2 + ping_do $h1 192.0.2.2 + check_err $? "h1-speed=$speed autoneg off, h2 autoneg on" + log_test "one side with autoneg off and another with autoneg on" + log_info "force speed = $speed" + done + + ethtool -s $h1 autoneg on +} + +hex_speed_value_get() +{ + local speed=$1; shift + + local shift_size=${speed_values[$speed]} + speed=$((0x1 << $"shift_size")) + printf "%#x" "$speed" +} + +subset_of_common_speeds_get() +{ + local dev1=$1; shift + local dev2=$1; shift + local adver=$1; shift + + local -a speeds_arr=($(common_speeds_get $dev1 $dev2 0 $adver)) + local speed_to_advertise=0 + local speed_to_remove=${speeds_arr[0]} + speed_to_remove+='base' + + local -a speeds_mode_arr=($(common_speeds_get $dev1 $dev2 1 $adver)) + + for speed in ${speeds_mode_arr[@]}; do + if [[ $speed != $speed_to_remove* ]]; then + speed=$(hex_speed_value_get $speed) + speed_to_advertise=$(($speed_to_advertise | \ + $speed)) + fi + + done + + # Convert to hex. + printf "%#x" "$speed_to_advertise" +} + +speed_to_advertise_get() +{ + # The function returns the hex number that is composed by OR-ing all + # the modes corresponding to the provided speed. + local speed_without_mode=$1; shift + local supported_speeds=("$@"); shift + local speed_to_advertise=0 + + speed_without_mode+='base' + + for speed in ${supported_speeds[@]}; do + if [[ $speed == $speed_without_mode* ]]; then + speed=$(hex_speed_value_get $speed) + speed_to_advertise=$(($speed_to_advertise | \ + $speed)) + fi + + done + + # Convert to hex. + printf "%#x" "$speed_to_advertise" +} + +advertise_subset_of_speeds() +{ + # Test that when one device advertises a subset of speeds and another + # advertises a specific speed (but all modes of this speed), the links + # are up and ping passes. + RET=0 + + local speed_1_to_advertise=$(subset_of_common_speeds_get $h1 $h2 1) + ethtool_set $h1 advertise $speed_1_to_advertise + + if [ $RET != 0 ]; then + log_test "advertise subset of speeds" + return + fi + + local -a speeds_arr_without_mode=($(common_speeds_get $h1 $h2 0 1)) + # Check only speeds that h1 advertised. Remove the first speed. + unset speeds_arr_without_mode[0] + local -a speeds_arr_with_mode=($(common_speeds_get $h1 $h2 1 1)) + + for speed_value in ${speeds_arr_without_mode[@]}; do + RET=0 + local speed_2_to_advertise=$(speed_to_advertise_get $speed_value \ + "${speeds_arr_with_mode[@]}") + ethtool_set $h2 advertise $speed_2_to_advertise + + setup_wait_dev_with_timeout $h1 + setup_wait_dev_with_timeout $h2 + ping_do $h1 192.0.2.2 + check_err $? "h1=$speed_1_to_advertise, h2=$speed_2_to_advertise ($speed_value)" + + log_test "advertise subset of speeds" + log_info "h1=$speed_1_to_advertise, h2=$speed_2_to_advertise" + done + + ethtool -s $h2 autoneg on + ethtool -s $h1 autoneg on +} + +check_highest_speed_is_chosen() +{ + # Test that when one device advertises a subset of speeds, the other + # chooses the highest speed. This test checks configuration without + # traffic. + RET=0 + + local max_speed + local chosen_speed + local speed_to_advertise=$(subset_of_common_speeds_get $h1 $h2 1) + + ethtool_set $h1 advertise $speed_to_advertise + + if [ $RET != 0 ]; then + log_test "check highest speed" + return + fi + + local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1)) + # Remove the first speed, h1 does not advertise this speed. + unset speeds_arr[0] + + max_speed=${speeds_arr[0]} + for current in ${speeds_arr[@]}; do + if [[ $current -gt $max_speed ]]; then + max_speed=$current + fi + done + + setup_wait_dev_with_timeout $h1 + setup_wait_dev_with_timeout $h2 + chosen_speed=$(ethtool $h1 | grep 'Speed:') + chosen_speed=${chosen_speed%"Mb/s"*} + chosen_speed=${chosen_speed#*"Speed: "} + ((chosen_speed == max_speed)) + check_err $? "h1 advertise $speed_to_advertise, h2 sync to speed $chosen_speed" + + log_test "check highest speed" + + ethtool -s $h2 autoneg on + ethtool -s $h1 autoneg on +} + +different_speeds_autoneg_on() +{ + # Test that when we configure links to advertise different speeds, + # links are not up and ping fails. + RET=0 + + local -a speeds=($(different_speeds_get $h1 $h2 1 1)) + local speed1=${speeds[0]} + local speed2=${speeds[1]} + + speed1=$(hex_speed_value_get $speed1) + speed2=$(hex_speed_value_get $speed2) + + ethtool_set $h1 advertise $speed1 + ethtool_set $h2 advertise $speed2 + + if (($RET)); then + setup_wait_dev_with_timeout $h1 + setup_wait_dev_with_timeout $h2 + ping_do $h1 192.0.2.2 + check_fail $? "ping with different speeds autoneg on" + fi + + log_test "advertise different speeds autoneg on" + + ethtool -s $h2 autoneg on + ethtool -s $h1 autoneg on +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +declare -gA speed_values +eval "speed_values=($(speeds_arr_get))" + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/ethtool_lib.sh b/tools/testing/selftests/net/forwarding/ethtool_lib.sh new file mode 100755 index 000000000000..925d229a59d8 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ethtool_lib.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +speeds_arr_get() +{ + cmd='/ETHTOOL_LINK_MODE_[^[:space:]]*_BIT[[:space:]]+=[[:space:]]+/ \ + {sub(/,$/, "") \ + sub(/ETHTOOL_LINK_MODE_/,"") \ + sub(/_BIT/,"") \ + sub(/_Full/,"/Full") \ + sub(/_Half/,"/Half");\ + print "["$1"]="$3}' + + awk "${cmd}" /usr/include/linux/ethtool.h +} + +ethtool_set() +{ + local cmd="$@" + local out=$(ethtool -s $cmd 2>&1 | wc -l) + + check_err $out "error in configuration. $cmd" +} + +dev_speeds_get() +{ + local dev=$1; shift + local with_mode=$1; shift + local adver=$1; shift + local speeds_str + + if (($adver)); then + mode="Advertised link modes" + else + mode="Supported link modes" + fi + + speeds_str=$(ethtool "$dev" | \ + # Snip everything before the link modes section. + sed -n '/'"$mode"':/,$p' | \ + # Quit processing the rest at the start of the next section. + # When checking, skip the header of this section (hence the 2,). + sed -n '2,${/^[\t][^ \t]/q};p' | \ + # Drop the section header of the current section. + cut -d':' -f2) + + local -a speeds_arr=($speeds_str) + if [[ $with_mode -eq 0 ]]; then + for ((i=0; i<${#speeds_arr[@]}; i++)); do + speeds_arr[$i]=${speeds_arr[$i]%base*} + done + fi + echo ${speeds_arr[@]} +} + +common_speeds_get() +{ + dev1=$1; shift + dev2=$1; shift + with_mode=$1; shift + adver=$1; shift + + local -a dev1_speeds=($(dev_speeds_get $dev1 $with_mode $adver)) + local -a dev2_speeds=($(dev_speeds_get $dev2 $with_mode $adver)) + + comm -12 \ + <(printf '%s\n' "${dev1_speeds[@]}" | sort -u) \ + <(printf '%s\n' "${dev2_speeds[@]}" | sort -u) +} diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 8b48ec54d058..1f64e7348f69 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -18,6 +18,8 @@ NETIF_CREATE=${NETIF_CREATE:=yes} MCD=${MCD:=smcrouted} MC_CLI=${MC_CLI:=smcroutectl} PING_TIMEOUT=${PING_TIMEOUT:=5} +WAIT_TIMEOUT=${WAIT_TIMEOUT:=20} +INTERFACE_TIMEOUT=${INTERFACE_TIMEOUT:=600} relative_path="${BASH_SOURCE%/*}" if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then @@ -226,24 +228,45 @@ log_info() setup_wait_dev() { local dev=$1; shift + local wait_time=${1:-$WAIT_TIME}; shift - while true; do + setup_wait_dev_with_timeout "$dev" $INTERFACE_TIMEOUT $wait_time + + if (($?)); then + check_err 1 + log_test setup_wait_dev ": Interface $dev does not come up." + exit 1 + fi +} + +setup_wait_dev_with_timeout() +{ + local dev=$1; shift + local max_iterations=${1:-$WAIT_TIMEOUT}; shift + local wait_time=${1:-$WAIT_TIME}; shift + local i + + for ((i = 1; i <= $max_iterations; ++i)); do ip link show dev $dev up \ | grep 'state UP' &> /dev/null if [[ $? -ne 0 ]]; then sleep 1 else - break + sleep $wait_time + return 0 fi done + + return 1 } setup_wait() { local num_netifs=${1:-$NUM_NETIFS} + local i for ((i = 1; i <= num_netifs; ++i)); do - setup_wait_dev ${NETIFS[p$i]} + setup_wait_dev ${NETIFS[p$i]} 0 done # Make sure links are ready. |