diff options
633 files changed, 14205 insertions, 8240 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net index beb8ec4dabbc..5ecfd72ba684 100644 --- a/Documentation/ABI/testing/sysfs-class-net +++ b/Documentation/ABI/testing/sysfs-class-net @@ -188,6 +188,14 @@ Description: Indicates the interface unique physical port identifier within the NIC, as a string. +What: /sys/class/net/<iface>/phys_port_name +Date: March 2015 +KernelVersion: 4.0 +Contact: [email protected] +Description: + Indicates the interface physical port name within the NIC, + as a string. + What: /sys/class/net/<iface>/speed Date: October 2009 KernelVersion: 2.6.33 diff --git a/Documentation/ABI/testing/sysfs-class-net-queues b/Documentation/ABI/testing/sysfs-class-net-queues index 5e9aeb91d355..0c0df91b1516 100644 --- a/Documentation/ABI/testing/sysfs-class-net-queues +++ b/Documentation/ABI/testing/sysfs-class-net-queues @@ -24,6 +24,14 @@ Description: Indicates the number of transmit timeout events seen by this network interface transmit queue. +What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate +Date: March 2015 +KernelVersion: 4.1 +Contact: [email protected] +Description: + A Mbps max-rate set for the queue, a value of zero means disabled, + default is disabled. + What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus Date: November 2010 KernelVersion: 2.6.38 diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt index f4445e5a2bbb..1e097037349c 100644 --- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt +++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt @@ -22,6 +22,8 @@ Optional Properties: - pclkN, clkN: Pairs of parent of input clock and input clock to the devices in this power domain. Maximum of 4 pairs (N = 0 to 3) are supported currently. +- power-domains: phandle pointing to the parent power domain, for more details + see Documentation/devicetree/bindings/power/power_domain.txt Node of a device using power domains must have a power-domains property defined with a phandle to respective power domain. diff --git a/Documentation/devicetree/bindings/arm/sti.txt b/Documentation/devicetree/bindings/arm/sti.txt index d70ec358736c..8d27f6b084c7 100644 --- a/Documentation/devicetree/bindings/arm/sti.txt +++ b/Documentation/devicetree/bindings/arm/sti.txt @@ -13,6 +13,10 @@ Boards with the ST STiH407 SoC shall have the following properties: Required root node property: compatible = "st,stih407"; +Boards with the ST STiH410 SoC shall have the following properties: +Required root node property: +compatible = "st,stih410"; + Boards with the ST STiH418 SoC shall have the following properties: Required root node property: compatible = "st,stih418"; diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt index 6151999c5dca..dc7961b33076 100644 --- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt @@ -15,6 +15,7 @@ Required properties for all the ethernet interfaces: - "ring_csr": Descriptor ring control and status register address space - "ring_cmd": Descriptor ring command register address space - interrupts: Ethernet main interrupt +- port-id: Port number (0 or 1) - clocks: Reference to the clock entry. - local-mac-address: MAC address assigned to this device - phy-connection-type: Interface type between ethernet device and PHY device @@ -49,6 +50,7 @@ Example: <0x0 0X10000000 0x0 0X200>; reg-names = "enet_csr", "ring_csr", "ring_cmd"; interrupts = <0x0 0x3c 0x4>; + port-id = <0>; clocks = <&menetclk 0>; local-mac-address = [00 01 73 00 00 01]; phy-connection-type = "rgmii"; diff --git a/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt b/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt index 0071883c08d8..fb6d49f184ed 100644 --- a/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt +++ b/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt @@ -13,11 +13,15 @@ Required properties: - cca-gpio: GPIO spec for the CCA pin - vreg-gpio: GPIO spec for the VREG pin - reset-gpio: GPIO spec for the RESET pin +Optional properties: + - amplified: include if the CC2520 is connected to a CC2591 amplifier + Example: cc2520@0 { compatible = "ti,cc2520"; reg = <0>; spi-max-frequency = <4000000>; + amplified; pinctrl-names = "default"; pinctrl-0 = <&cc2520_cape_pins>; fifo-gpio = <&gpio1 18 0>; diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt index f9c07710478d..d0e6fa38f335 100644 --- a/Documentation/devicetree/bindings/net/keystone-netcp.txt +++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt @@ -49,6 +49,7 @@ Required properties: - compatible: Should be "ti,netcp-1.0" - clocks: phandle to the reference clocks for the subsystem. - dma-id: Navigator packet dma instance id. +- ranges: address range of NetCP (includes, Ethernet SS, PA and SA) Optional properties: - reg: register location and the size for the following register @@ -64,10 +65,30 @@ NetCP device properties: Device specification for NetCP sub-modules. 1Gb/10Gb (gbe/xgbe) ethernet switch sub-module specifications. Required properties: - label: Must be "netcp-gbe" for 1Gb & "netcp-xgbe" for 10Gb. +- compatible: Must be one of below:- + "ti,netcp-gbe" for 1GbE on NetCP 1.4 + "ti,netcp-gbe-5" for 1GbE N NetCP 1.5 (N=5) + "ti,netcp-gbe-9" for 1GbE N NetCP 1.5 (N=9) + "ti,netcp-gbe-2" for 1GbE N NetCP 1.5 (N=2) + "ti,netcp-xgbe" for 10 GbE + - reg: register location and the size for the following register regions in the specified order. - - subsystem registers - - serdes registers + - switch subsystem registers + - sgmii port3/4 module registers (only for NetCP 1.4) + - switch module registers + - serdes registers (only for 10G) + + NetCP 1.4 ethss, here is the order + index #0 - switch subsystem registers + index #1 - sgmii port3/4 module registers + index #2 - switch module registers + + NetCP 1.5 ethss 9 port, 5 port and 2 port + index #0 - switch subsystem registers + index #1 - switch module registers + index #2 - serdes registers + - tx-channel: the navigator packet dma channel name for tx. - tx-queue: the navigator queue number associated with the tx dma channel. - interfaces: specification for each of the switch port to be registered as a @@ -120,14 +141,13 @@ Optional properties: Example binding: -netcp: netcp@2090000 { +netcp: netcp@2000000 { reg = <0x2620110 0x8>; reg-names = "efuse"; compatible = "ti,netcp-1.0"; #address-cells = <1>; #size-cells = <1>; - ranges; - + ranges = <0 0x2000000 0xfffff>; clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>; dma-coherent; /* big-endian; */ @@ -137,9 +157,9 @@ netcp: netcp@2090000 { #address-cells = <1>; #size-cells = <1>; ranges; - gbe@0x2090000 { + gbe@90000 { label = "netcp-gbe"; - reg = <0x2090000 0xf00>; + reg = <0x90000 0x300>, <0x90400 0x400>, <0x90800 0x700>; /* enable-ale; */ tx-queue = <648>; tx-channel = <8>; diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index 98c16672ab5f..0f8ed3710c66 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt @@ -19,6 +19,16 @@ Required properties: providing multiple PM domains (e.g. power controllers), but can be any value as specified by device tree binding documentation of particular provider. +Optional properties: + - power-domains : A phandle and PM domain specifier as defined by bindings of + the power controller specified by phandle. + Some power domains might be powered from another power domain (or have + other hardware specific dependencies). For representing such dependency + a standard PM domain consumer binding is used. When provided, all domains + created by the given provider should be subdomains of the domain + specified by this binding. More details about power domain specifier are + available in the next section. + Example: power: power-controller@12340000 { @@ -30,6 +40,25 @@ Example: The node above defines a power controller that is a PM domain provider and expects one cell as its phandle argument. +Example 2: + + parent: power-controller@12340000 { + compatible = "foo,power-controller"; + reg = <0x12340000 0x1000>; + #power-domain-cells = <1>; + }; + + child: power-controller@12340000 { + compatible = "foo,power-controller"; + reg = <0x12341000 0x1000>; + power-domains = <&parent 0>; + #power-domain-cells = <1>; + }; + +The nodes above define two power controllers: 'parent' and 'child'. +Domains created by the 'child' power controller are subdomains of '0' power +domain provided by the 'parent' power controller. + ==PM domain consumers== Required properties: diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/8250.txt index 91d5ab0e60fc..91d5ab0e60fc 100644 --- a/Documentation/devicetree/bindings/serial/of-serial.txt +++ b/Documentation/devicetree/bindings/serial/8250.txt diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt new file mode 100644 index 000000000000..ebcbb62c0a76 --- /dev/null +++ b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt @@ -0,0 +1,19 @@ +ETRAX FS UART + +Required properties: +- compatible : "axis,etraxfs-uart" +- reg: offset and length of the register set for the device. +- interrupts: device interrupt + +Optional properties: +- {dtr,dsr,ri,cd}-gpios: specify a GPIO for DTR/DSR/RI/CD + line respectively. + +Example: + +serial@b00260000 { + compatible = "axis,etraxfs-uart"; + reg = <0xb0026000 0x1000>; + interrupts = <68>; + status = "disabled"; +}; diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt index 56742bc70218..7d44eae7ab0b 100644 --- a/Documentation/devicetree/bindings/submitting-patches.txt +++ b/Documentation/devicetree/bindings/submitting-patches.txt @@ -12,6 +12,9 @@ I. For patch submitters + and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify + all of the DT maintainers. + 3) The Documentation/ portion of the patch should come in the series before the code implementing the binding. diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 389ca1347a77..fae26d014aaf 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -20,6 +20,7 @@ amlogic Amlogic, Inc. ams AMS AG amstaos AMS-Taos Inc. apm Applied Micro Circuits Corporation (APM) +arasan Arasan Chip Systems arm ARM Ltd. armadeus ARMadeus Systems SARL asahi-kasei Asahi Kasei Corp. @@ -27,6 +28,7 @@ atmel Atmel Corporation auo AU Optronics Corporation avago Avago Technologies avic Shanghai AVIC Optoelectronics Co., Ltd. +axis Axis Communications AB bosch Bosch Sensortec GmbH brcm Broadcom Corporation buffalo Buffalo, Inc. diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt index f90e294d7631..a4d869744f59 100644 --- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt @@ -26,6 +26,11 @@ Optional properties: - atmel,disable : Should be present if you want to disable the watchdog. - atmel,idle-halt : Should be present if you want to stop the watchdog when entering idle state. + CAUTION: This property should be used with care, it actually makes the + watchdog not counting when the CPU is in idle state, therefore the + watchdog reset time depends on mean CPU usage and will not reset at all + if the CPU stop working while it is in idle state, which is probably + not what you want. - atmel,dbg-halt : Should be present if you want to stop the watchdog when entering debug state. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 4412f695a62f..6c07c2b36909 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1126,11 +1126,23 @@ arp_accept - BOOLEAN gratuitous arp frame, the arp table will be updated regardless if this setting is on or off. +mcast_solicit - INTEGER + The maximum number of multicast probes in INCOMPLETE state, + when the associated hardware address is unknown. Defaults + to 3. + +ucast_solicit - INTEGER + The maximum number of unicast probes in PROBE state, when + the hardware address is being reconfirmed. Defaults to 3. app_solicit - INTEGER The maximum number of probes to send to the user space ARP daemon via netlink before dropping back to multicast probes (see - mcast_solicit). Defaults to 0. + mcast_resolicit). Defaults to 0. + +mcast_resolicit - INTEGER + The maximum number of multicast probes after unicast and + app probes in PROBE state. Defaults to 0. disable_policy - BOOLEAN Disable IPSEC policy (SPD) for this interface diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index a6d7cb91069e..daa015af16a0 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt @@ -440,9 +440,10 @@ and the following flags apply: +++ Capture process: from include/linux/if_packet.h - #define TP_STATUS_COPY 2 - #define TP_STATUS_LOSING 4 - #define TP_STATUS_CSUMNOTREADY 8 + #define TP_STATUS_COPY (1 << 1) + #define TP_STATUS_LOSING (1 << 2) + #define TP_STATUS_CSUMNOTREADY (1 << 3) + #define TP_STATUS_CSUM_VALID (1 << 7) TP_STATUS_COPY : This flag indicates that the frame (and associated meta information) has been truncated because it's @@ -466,6 +467,12 @@ TP_STATUS_CSUMNOTREADY: currently it's used for outgoing IP packets which reading the packet we should not try to check the checksum. +TP_STATUS_CSUM_VALID : This flag indicates that at least the transport + header checksum of the packet has been already + validated on the kernel side. If the flag is not set + then we are free to check the checksum by ourselves + provided that TP_STATUS_CSUMNOTREADY is also not set. + for convenience there are also the following defines: #define TP_STATUS_KERNEL 0 diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index 99ca40e8e810..cbfac0949635 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt @@ -421,6 +421,15 @@ best CPUs to share a given queue are probably those that share the cache with the CPU that processes transmit completions for that queue (transmit interrupts). +Per TX Queue rate limitation: +============================= + +These are rate-limitation mechanisms implemented by HW, where currently +a max-rate attribute is supported, by setting a Mbps value to + +/sys/class/net/<dev>/queues/tx-<n>/tx_maxrate + +A value of zero means disabled, and this is the default. Further Information =================== diff --git a/MAINTAINERS b/MAINTAINERS index 42e221c48776..66ab548ee469 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1030,6 +1030,16 @@ F: arch/arm/mach-mxs/ F: arch/arm/boot/dts/imx* F: arch/arm/configs/imx*_defconfig +ARM/FREESCALE VYBRID ARM ARCHITECTURE +M: Shawn Guo <[email protected]> +M: Sascha Hauer <[email protected]> +R: Stefan Agner <[email protected]> +L: [email protected] (moderated for non-subscribers) +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git +F: arch/arm/mach-imx/*vf610* +F: arch/arm/boot/dts/vf* + ARM/GLOMATION GESBC9312SX MACHINE SUPPORT M: Lennert Buytenhek <[email protected]> L: [email protected] (moderated for non-subscribers) @@ -1188,6 +1198,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support M: Jason Cooper <[email protected]> M: Andrew Lunn <[email protected]> M: Sebastian Hesselbarth <[email protected]> +M: Gregory Clement <[email protected]> L: [email protected] (moderated for non-subscribers) S: Maintained F: arch/arm/mach-dove/ @@ -1730,7 +1741,7 @@ S: Maintained F: drivers/net/ethernet/atheros/ ATM -M: Chas Williams <[email protected]> +M: Chas Williams <[email protected]> L: [email protected] (moderated for non-subscribers) W: http://linux-atm.sourceforge.net @@ -2107,7 +2118,6 @@ F: drivers/net/ethernet/broadcom/bnx2x/ BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE M: Christian Daudt <[email protected]> -M: Matt Porter <[email protected]> M: Florian Fainelli <[email protected]> T: git git://github.com/broadcom/mach-bcm @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 7f99cd652203..eb7bb511f853 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -150,6 +150,7 @@ machine-$(CONFIG_ARCH_BERLIN) += berlin machine-$(CONFIG_ARCH_CLPS711X) += clps711x machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx machine-$(CONFIG_ARCH_DAVINCI) += davinci +machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor machine-$(CONFIG_ARCH_DOVE) += dove machine-$(CONFIG_ARCH_EBSA110) += ebsa110 machine-$(CONFIG_ARCH_EFM32) += efm32 diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index 2c6248d9a9ef..c3255e0c90aa 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -301,3 +301,11 @@ cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; cd-inverted; }; + +&aes { + status = "okay"; +}; + +&sham { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts index 83d40f7655e5..6b8493720424 100644 --- a/arch/arm/boot/dts/am335x-bone.dts +++ b/arch/arm/boot/dts/am335x-bone.dts @@ -24,11 +24,3 @@ &mmc1 { vmmc-supply = <&ldo3_reg>; }; - -&sham { - status = "okay"; -}; - -&aes { - status = "okay"; -}; diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts index 7266a00aab2e..5c5667a3624d 100644 --- a/arch/arm/boot/dts/am335x-lxm.dts +++ b/arch/arm/boot/dts/am335x-lxm.dts @@ -328,6 +328,10 @@ dual_emac_res_vlan = <3>; }; +&phy_sel { + rmii-clock-ext; +}; + &mac { pinctrl-names = "default", "sleep"; pinctrl-0 = <&cpsw_default>; diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi index 712edce7d6fb..071b56aa0c7e 100644 --- a/arch/arm/boot/dts/am33xx-clocks.dtsi +++ b/arch/arm/boot/dts/am33xx-clocks.dtsi @@ -99,7 +99,7 @@ ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <0>; reg = <0x0664>; }; @@ -107,7 +107,7 @@ ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <1>; reg = <0x0664>; }; @@ -115,7 +115,7 @@ ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <2>; reg = <0x0664>; }; diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi index c7dc9dab93a4..cfb49686ab6a 100644 --- a/arch/arm/boot/dts/am43xx-clocks.dtsi +++ b/arch/arm/boot/dts/am43xx-clocks.dtsi @@ -107,7 +107,7 @@ ehrpwm0_tbclk: ehrpwm0_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <0>; reg = <0x0664>; }; @@ -115,7 +115,7 @@ ehrpwm1_tbclk: ehrpwm1_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <1>; reg = <0x0664>; }; @@ -123,7 +123,7 @@ ehrpwm2_tbclk: ehrpwm2_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <2>; reg = <0x0664>; }; @@ -131,7 +131,7 @@ ehrpwm3_tbclk: ehrpwm3_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <4>; reg = <0x0664>; }; @@ -139,7 +139,7 @@ ehrpwm4_tbclk: ehrpwm4_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <5>; reg = <0x0664>; }; @@ -147,7 +147,7 @@ ehrpwm5_tbclk: ehrpwm5_tbclk { #clock-cells = <0>; compatible = "ti,gate-clock"; - clocks = <&dpll_per_m2_ck>; + clocks = <&l4ls_gclk>; ti,bit-shift = <6>; reg = <0x0664>; }; diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index 9f7c7376f2cf..62d25b14deb8 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi @@ -494,12 +494,12 @@ pinctrl_usart3_rts: usart3_rts-0 { atmel,pins = - <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC8 periph B */ + <AT91_PIOC 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; }; pinctrl_usart3_cts: usart3_cts-0 { atmel,pins = - <AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC10 periph B */ + <AT91_PIOC 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; }; }; @@ -853,7 +853,7 @@ }; usb1: gadget@fffa4000 { - compatible = "atmel,at91rm9200-udc"; + compatible = "atmel,at91sam9260-udc"; reg = <0xfffa4000 0x4000>; interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; clocks = <&udc_clk>, <&udpck>; @@ -976,7 +976,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi index e247b0b5fdab..d55fdf2487ef 100644 --- a/arch/arm/boot/dts/at91sam9261.dtsi +++ b/arch/arm/boot/dts/at91sam9261.dtsi @@ -124,11 +124,12 @@ }; usb1: gadget@fffa4000 { - compatible = "atmel,at91rm9200-udc"; + compatible = "atmel,at91sam9261-udc"; reg = <0xfffa4000 0x4000>; interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&udc_clk>, <&udpck>; - clock-names = "usb_clk", "udc_clk", "udpck"; + clocks = <&udc_clk>, <&udpck>; + clock-names = "pclk", "hclk"; + atmel,matrix = <&matrix>; status = "disabled"; }; @@ -262,7 +263,7 @@ }; matrix: matrix@ffffee00 { - compatible = "atmel,at91sam9260-bus-matrix"; + compatible = "atmel,at91sam9260-bus-matrix", "syscon"; reg = <0xffffee00 0x200>; }; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 340179ef6ba0..e4f61a979a57 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -69,7 +69,7 @@ sram1: sram@00500000 { compatible = "mmio-sram"; - reg = <0x00300000 0x4000>; + reg = <0x00500000 0x4000>; }; ahb { @@ -856,7 +856,7 @@ }; usb1: gadget@fff78000 { - compatible = "atmel,at91rm9200-udc"; + compatible = "atmel,at91sam9263-udc"; reg = <0xfff78000 0x4000>; interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>; clocks = <&udc_clk>, <&udpck>; @@ -905,7 +905,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index 586eab7b653d..8ec05b11298a 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -1116,7 +1116,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; @@ -1301,7 +1300,7 @@ compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; reg = <0x00800000 0x100000>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; + clocks = <&utmi>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi index c2666a7cb5b1..0c53a375ba99 100644 --- a/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/arch/arm/boot/dts/at91sam9n12.dtsi @@ -894,7 +894,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 818dabdd8c0e..d221179d0f1a 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -1066,7 +1066,7 @@ reg = <0x00500000 0x80000 0xf803c000 0x400>; interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; - clocks = <&usb>, <&udphs_clk>; + clocks = <&utmi>, <&udphs_clk>; clock-names = "hclk", "pclk"; status = "disabled"; @@ -1130,7 +1130,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; @@ -1186,7 +1185,7 @@ compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; reg = <0x00700000 0x100000>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&uhpck>; + clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ehci_clk", "uhpck"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 3290a96ba586..7563d7ce01bb 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -263,17 +263,15 @@ dcan1_pins_default: dcan1_pins_default { pinctrl-single,pins = < - 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ - 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ - 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */ + 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */ + 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */ >; }; dcan1_pins_sleep: dcan1_pins_sleep { pinctrl-single,pins = < - 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ - 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ - 0x418 (MUX_MODE15) /* wakeup0.off */ + 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */ + 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */ >; }; }; diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index e0264d0bf7b9..40ed539ce474 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts @@ -119,17 +119,15 @@ dcan1_pins_default: dcan1_pins_default { pinctrl-single,pins = < - 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ - 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ - 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */ + 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */ + 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */ >; }; dcan1_pins_sleep: dcan1_pins_sleep { pinctrl-single,pins = < - 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ - 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ - 0x418 (MUX_MODE15) /* wakeup0.off */ + 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */ + 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */ >; }; diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index 4bdcbd61ce47..99b09a44e269 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi @@ -243,10 +243,18 @@ ti,invert-autoidle-bit; }; + dpll_core_byp_mux: dpll_core_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + ti,bit-shift = <23>; + reg = <0x012c>; + }; + dpll_core_ck: dpll_core_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-core-clock"; - clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + clocks = <&sys_clkin1>, <&dpll_core_byp_mux>; reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; }; @@ -309,10 +317,18 @@ clock-div = <1>; }; + dpll_dsp_byp_mux: dpll_dsp_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x0240>; + }; + dpll_dsp_ck: dpll_dsp_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; + clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>; reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>; }; @@ -335,10 +351,18 @@ clock-div = <1>; }; + dpll_iva_byp_mux: dpll_iva_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x01ac>; + }; + dpll_iva_ck: dpll_iva_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; + clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>; reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; }; @@ -361,10 +385,18 @@ clock-div = <1>; }; + dpll_gpu_byp_mux: dpll_gpu_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + ti,bit-shift = <23>; + reg = <0x02e4>; + }; + dpll_gpu_ck: dpll_gpu_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>; reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>; }; @@ -398,10 +430,18 @@ clock-div = <1>; }; + dpll_ddr_byp_mux: dpll_ddr_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + ti,bit-shift = <23>; + reg = <0x021c>; + }; + dpll_ddr_ck: dpll_ddr_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>; reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>; }; @@ -416,10 +456,18 @@ ti,invert-autoidle-bit; }; + dpll_gmac_byp_mux: dpll_gmac_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + ti,bit-shift = <23>; + reg = <0x02b4>; + }; + dpll_gmac_ck: dpll_gmac_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; + clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>; reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>; }; @@ -482,10 +530,18 @@ clock-div = <1>; }; + dpll_eve_byp_mux: dpll_eve_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x0290>; + }; + dpll_eve_ck: dpll_eve_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; + clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>; reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>; }; @@ -1249,10 +1305,18 @@ clock-div = <1>; }; + dpll_per_byp_mux: dpll_per_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x014c>; + }; + dpll_per_ck: dpll_per_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; + clocks = <&sys_clkin1>, <&dpll_per_byp_mux>; reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; }; @@ -1275,10 +1339,18 @@ clock-div = <1>; }; + dpll_usb_byp_mux: dpll_usb_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x018c>; + }; + dpll_usb_ck: dpll_usb_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-j-type-clock"; - clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; + clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>; reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; }; diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index 277b48b0b6f9..ac6b0ae42caf 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi @@ -18,6 +18,7 @@ */ #include "skeleton.dtsi" +#include "exynos4-cpu-thermal.dtsi" #include <dt-bindings/clock/exynos3250.h> / { @@ -193,6 +194,7 @@ interrupts = <0 216 0>; clocks = <&cmu CLK_TMU_APBIF>; clock-names = "tmu_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" status = "disabled"; }; diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi new file mode 100644 index 000000000000..735cb2f10817 --- /dev/null +++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi @@ -0,0 +1,52 @@ +/* + * Device tree sources for Exynos4 thermal zone + * + * Copyright (c) 2014 Lukasz Majewski <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <dt-bindings/thermal/thermal.h> + +/ { +thermal-zones { + cpu_thermal: cpu-thermal { + thermal-sensors = <&tmu 0>; + polling-delay-passive = <0>; + polling-delay = <0>; + trips { + cpu_alert0: cpu-alert-0 { + temperature = <70000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu_alert1: cpu-alert-1 { + temperature = <95000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu_alert2: cpu-alert-2 { + temperature = <110000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu_crit0: cpu-crit-0 { + temperature = <120000>; /* millicelsius */ + hysteresis = <0>; /* millicelsius */ + type = "critical"; + }; + }; + cooling-maps { + map0 { + trip = <&cpu_alert0>; + }; + map1 { + trip = <&cpu_alert1>; + }; + }; + }; +}; +}; diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index 76173cacd450..77ea547768f4 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi @@ -38,6 +38,7 @@ i2c5 = &i2c_5; i2c6 = &i2c_6; i2c7 = &i2c_7; + i2c8 = &i2c_8; csis0 = &csis_0; csis1 = &csis_1; fimc0 = &fimc_0; @@ -104,6 +105,7 @@ compatible = "samsung,exynos4210-pd"; reg = <0x10023C20 0x20>; #power-domain-cells = <0>; + power-domains = <&pd_lcd0>; }; pd_cam: cam-power-domain@10023C00 { @@ -554,6 +556,22 @@ status = "disabled"; }; + i2c_8: i2c@138E0000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "samsung,s3c2440-hdmiphy-i2c"; + reg = <0x138E0000 0x100>; + interrupts = <0 93 0>; + clocks = <&clock CLK_I2C_HDMI>; + clock-names = "i2c"; + status = "disabled"; + + hdmi_i2c_phy: hdmiphy@38 { + compatible = "exynos4210-hdmiphy"; + reg = <0x38>; + }; + }; + spi_0: spi@13920000 { compatible = "samsung,exynos4210-spi"; reg = <0x13920000 0x100>; @@ -663,6 +681,33 @@ status = "disabled"; }; + tmu: tmu@100C0000 { + #include "exynos4412-tmu-sensor-conf.dtsi" + }; + + hdmi: hdmi@12D00000 { + compatible = "samsung,exynos4210-hdmi"; + reg = <0x12D00000 0x70000>; + interrupts = <0 92 0>; + clock-names = "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy", + "mout_hdmi"; + clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, + <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, + <&clock CLK_MOUT_HDMI>; + phy = <&hdmi_i2c_phy>; + power-domains = <&pd_tv>; + samsung,syscon-phandle = <&pmu_system_controller>; + status = "disabled"; + }; + + mixer: mixer@12C10000 { + compatible = "samsung,exynos4210-mixer"; + interrupts = <0 91 0>; + reg = <0x12C10000 0x2100>, <0x12c00000 0x300>; + power-domains = <&pd_tv>; + status = "disabled"; + }; + ppmu_dmc0: ppmu_dmc0@106a0000 { compatible = "samsung,exynos-ppmu"; reg = <0x106a0000 0x2000>; diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index 3d6652a4b6cb..32c5fd8f6269 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts @@ -426,6 +426,25 @@ status = "okay"; }; + tmu@100C0000 { + status = "okay"; + }; + + thermal-zones { + cpu_thermal: cpu-thermal { + cooling-maps { + map0 { + /* Corresponds to 800MHz at freq_table */ + cooling-device = <&cpu0 2 2>; + }; + map1 { + /* Corresponds to 200MHz at freq_table */ + cooling-device = <&cpu0 4 4>; + }; + }; + }; + }; + camera { pinctrl-names = "default"; pinctrl-0 = <>; diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts index b57e6b82ea20..d4f2b11319dd 100644 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts @@ -505,6 +505,63 @@ assigned-clock-rates = <0>, <160000000>; }; }; + + hdmi_en: voltage-regulator-hdmi-5v { + compatible = "regulator-fixed"; + regulator-name = "HDMI_5V"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpe0 1 0>; + enable-active-high; + }; + + hdmi_ddc: i2c-ddc { + compatible = "i2c-gpio"; + gpios = <&gpe4 2 0 &gpe4 3 0>; + i2c-gpio,delay-us = <100>; + #address-cells = <1>; + #size-cells = <0>; + + pinctrl-0 = <&i2c_ddc_bus>; + pinctrl-names = "default"; + status = "okay"; + }; + + mixer@12C10000 { + status = "okay"; + }; + + hdmi@12D00000 { + hpd-gpio = <&gpx3 7 0>; + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_hpd>; + hdmi-en-supply = <&hdmi_en>; + vdd-supply = <&ldo3_reg>; + vdd_osc-supply = <&ldo4_reg>; + vdd_pll-supply = <&ldo3_reg>; + ddc = <&hdmi_ddc>; + status = "okay"; + }; + + i2c@138E0000 { + status = "okay"; + }; +}; + +&pinctrl_1 { + hdmi_hpd: hdmi-hpd { + samsung,pins = "gpx3-7"; + samsung,pin-pud = <0>; + }; +}; + +&pinctrl_0 { + i2c_ddc_bus: i2c-ddc-bus { + samsung,pins = "gpe4-2", "gpe4-3"; + samsung,pin-function = <2>; + samsung,pin-pud = <3>; + samsung,pin-drv = <0>; + }; }; &mdma1 { diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index 67c832c9dcf1..be89f83f70e7 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi @@ -21,6 +21,7 @@ #include "exynos4.dtsi" #include "exynos4210-pinctrl.dtsi" +#include "exynos4-cpu-thermal.dtsi" / { compatible = "samsung,exynos4210", "samsung,exynos4"; @@ -35,10 +36,13 @@ #address-cells = <1>; #size-cells = <0>; - cpu@900 { + cpu0: cpu@900 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0x900>; + cooling-min-level = <4>; + cooling-max-level = <2>; + #cooling-cells = <2>; /* min followed by max */ }; cpu@901 { @@ -153,16 +157,38 @@ reg = <0x03860000 0x1000>; }; - tmu@100C0000 { + tmu: tmu@100C0000 { compatible = "samsung,exynos4210-tmu"; interrupt-parent = <&combiner>; reg = <0x100C0000 0x100>; interrupts = <2 4>; clocks = <&clock CLK_TMU_APBIF>; clock-names = "tmu_apbif"; + samsung,tmu_gain = <15>; + samsung,tmu_reference_voltage = <7>; status = "disabled"; }; + thermal-zones { + cpu_thermal: cpu-thermal { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tmu 0>; + + trips { + cpu_alert0: cpu-alert-0 { + temperature = <85000>; /* millicelsius */ + }; + cpu_alert1: cpu-alert-1 { + temperature = <100000>; /* millicelsius */ + }; + cpu_alert2: cpu-alert-2 { + temperature = <110000>; /* millicelsius */ + }; + }; + }; + }; + g2d@12800000 { compatible = "samsung,s5pv210-g2d"; reg = <0x12800000 0x1000>; @@ -203,6 +229,14 @@ }; }; + mixer: mixer@12C10000 { + clock-names = "mixer", "hdmi", "sclk_hdmi", "vp", "mout_mixer", + "sclk_mixer"; + clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, + <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>, + <&clock CLK_MOUT_MIXER>, <&clock CLK_SCLK_MIXER>; + }; + ppmu_lcd1: ppmu_lcd1@12240000 { compatible = "samsung,exynos-ppmu"; reg = <0x12240000 0x2000>; diff --git a/arch/arm/boot/dts/exynos4212.dtsi b/arch/arm/boot/dts/exynos4212.dtsi index dd0a43ec56da..5be03288f1ee 100644 --- a/arch/arm/boot/dts/exynos4212.dtsi +++ b/arch/arm/boot/dts/exynos4212.dtsi @@ -26,10 +26,13 @@ #address-cells = <1>; #size-cells = <0>; - cpu@A00 { + cpu0: cpu@A00 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0xA00>; + cooling-min-level = <13>; + cooling-max-level = <7>; + #cooling-cells = <2>; /* min followed by max */ }; cpu@A01 { diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index de80b5bba204..adb4f6a97a1d 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi @@ -249,6 +249,20 @@ regulator-always-on; }; + ldo8_reg: ldo@8 { + regulator-compatible = "LDO8"; + regulator-name = "VDD10_HDMI_1.0V"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + }; + + ldo10_reg: ldo@10 { + regulator-compatible = "LDO10"; + regulator-name = "VDDQ_MIPIHSI_1.8V"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + ldo11_reg: LDO11 { regulator-name = "VDD18_ABB1_1.8V"; regulator-min-microvolt = <1800000>; @@ -411,6 +425,51 @@ ehci: ehci@12580000 { status = "okay"; }; + + tmu@100C0000 { + vtmu-supply = <&ldo10_reg>; + status = "okay"; + }; + + thermal-zones { + cpu_thermal: cpu-thermal { + cooling-maps { + map0 { + /* Corresponds to 800MHz at freq_table */ + cooling-device = <&cpu0 7 7>; + }; + map1 { + /* Corresponds to 200MHz at freq_table */ + cooling-device = <&cpu0 13 13>; + }; + }; + }; + }; + + mixer: mixer@12C10000 { + status = "okay"; + }; + + hdmi@12D00000 { + hpd-gpio = <&gpx3 7 0>; + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_hpd>; + vdd-supply = <&ldo8_reg>; + vdd_osc-supply = <&ldo10_reg>; + vdd_pll-supply = <&ldo8_reg>; + ddc = <&hdmi_ddc>; + status = "okay"; + }; + + hdmi_ddc: i2c@13880000 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_bus>; + }; + + i2c@138E0000 { + status = "okay"; + }; }; &pinctrl_1 { @@ -425,4 +484,9 @@ samsung,pin-pud = <0>; samsung,pin-drv = <0>; }; + + hdmi_hpd: hdmi-hpd { + samsung,pins = "gpx3-7"; + samsung,pin-pud = <1>; + }; }; diff --git a/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi new file mode 100644 index 000000000000..e3f7934d19d0 --- /dev/null +++ b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi @@ -0,0 +1,24 @@ +/* + * Device tree sources for Exynos4412 TMU sensor configuration + * + * Copyright (c) 2014 Lukasz Majewski <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <dt-bindings/thermal/thermal_exynos.h> + +#thermal-sensor-cells = <0>; +samsung,tmu_gain = <8>; +samsung,tmu_reference_voltage = <16>; +samsung,tmu_noise_cancel_mode = <4>; +samsung,tmu_efuse_value = <55>; +samsung,tmu_min_efuse_value = <40>; +samsung,tmu_max_efuse_value = <100>; +samsung,tmu_first_point_trim = <25>; +samsung,tmu_second_point_trim = <85>; +samsung,tmu_default_temp_offset = <50>; +samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>; diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 21f748083586..173ffa479ad3 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts @@ -927,6 +927,21 @@ pulldown-ohm = <100000>; /* 100K */ io-channels = <&adc 2>; /* Battery temperature */ }; + + thermal-zones { + cpu_thermal: cpu-thermal { + cooling-maps { + map0 { + /* Corresponds to 800MHz at freq_table */ + cooling-device = <&cpu0 7 7>; + }; + map1 { + /* Corresponds to 200MHz at freq_table */ + cooling-device = <&cpu0 13 13>; + }; + }; + }; + }; }; &pmu_system_controller { diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi index 0f6ec93bb1d8..68ad43b391ae 100644 --- a/arch/arm/boot/dts/exynos4412.dtsi +++ b/arch/arm/boot/dts/exynos4412.dtsi @@ -26,10 +26,13 @@ #address-cells = <1>; #size-cells = <0>; - cpu@A00 { + cpu0: cpu@A00 { device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0xA00>; + cooling-min-level = <13>; + cooling-max-level = <7>; + #cooling-cells = <2>; /* min followed by max */ }; cpu@A01 { diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi index f5e0ae780d6c..6a6abe14fd9b 100644 --- a/arch/arm/boot/dts/exynos4x12.dtsi +++ b/arch/arm/boot/dts/exynos4x12.dtsi @@ -19,6 +19,7 @@ #include "exynos4.dtsi" #include "exynos4x12-pinctrl.dtsi" +#include "exynos4-cpu-thermal.dtsi" / { aliases { @@ -297,4 +298,15 @@ clock-names = "tmu_apbif"; status = "disabled"; }; + + hdmi: hdmi@12D00000 { + compatible = "samsung,exynos4212-hdmi"; + }; + + mixer: mixer@12C10000 { + compatible = "samsung,exynos4212-mixer"; + clock-names = "mixer", "hdmi", "sclk_hdmi", "vp"; + clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, + <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>; + }; }; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 9bb1b0b738f5..adbde1adad95 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -20,7 +20,7 @@ #include <dt-bindings/clock/exynos5250.h> #include "exynos5.dtsi" #include "exynos5250-pinctrl.dtsi" - +#include "exynos4-cpu-thermal.dtsi" #include <dt-bindings/clock/exynos-audss-clk.h> / { @@ -58,11 +58,14 @@ #address-cells = <1>; #size-cells = <0>; - cpu@0 { + cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a15"; reg = <0>; clock-frequency = <1700000000>; + cooling-min-level = <15>; + cooling-max-level = <9>; + #cooling-cells = <2>; /* min followed by max */ }; cpu@1 { device_type = "cpu"; @@ -102,6 +105,12 @@ #power-domain-cells = <0>; }; + pd_disp1: disp1-power-domain@100440A0 { + compatible = "samsung,exynos4210-pd"; + reg = <0x100440A0 0x20>; + #power-domain-cells = <0>; + }; + clock: clock-controller@10010000 { compatible = "samsung,exynos5250-clock"; reg = <0x10010000 0x30000>; @@ -235,12 +244,32 @@ status = "disabled"; }; - tmu@10060000 { + tmu: tmu@10060000 { compatible = "samsung,exynos5250-tmu"; reg = <0x10060000 0x100>; interrupts = <0 65 0>; clocks = <&clock CLK_TMU>; clock-names = "tmu_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" + }; + + thermal-zones { + cpu_thermal: cpu-thermal { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&tmu 0>; + + cooling-maps { + map0 { + /* Corresponds to 800MHz at freq_table */ + cooling-device = <&cpu0 9 9>; + }; + map1 { + /* Corresponds to 200MHz at freq_table */ + cooling-device = <&cpu0 15 15>; + }; + }; + }; }; serial@12C00000 { @@ -719,6 +748,7 @@ hdmi: hdmi { compatible = "samsung,exynos4212-hdmi"; reg = <0x14530000 0x70000>; + power-domains = <&pd_disp1>; interrupts = <0 95 0>; clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, @@ -731,9 +761,11 @@ mixer { compatible = "samsung,exynos5250-mixer"; reg = <0x14450000 0x10000>; + power-domains = <&pd_disp1>; interrupts = <0 94 0>; - clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; - clock-names = "mixer", "sclk_hdmi"; + clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, + <&clock CLK_SCLK_HDMI>; + clock-names = "mixer", "hdmi", "sclk_hdmi"; }; dp_phy: video-phy@10040720 { @@ -743,6 +775,7 @@ }; dp: dp-controller@145B0000 { + power-domains = <&pd_disp1>; clocks = <&clock CLK_DP>; clock-names = "dp"; phys = <&dp_phy>; @@ -750,6 +783,7 @@ }; fimd: fimd@14400000 { + power-domains = <&pd_disp1>; clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>; clock-names = "sclk_fimd", "fimd"; }; diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi new file mode 100644 index 000000000000..5d31fc140823 --- /dev/null +++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi @@ -0,0 +1,35 @@ +/* + * Device tree sources for default Exynos5420 thermal zone definition + * + * Copyright (c) 2014 Lukasz Majewski <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +polling-delay-passive = <0>; +polling-delay = <0>; +trips { + cpu-alert-0 { + temperature = <85000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu-alert-1 { + temperature = <103000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu-alert-2 { + temperature = <110000>; /* millicelsius */ + hysteresis = <10000>; /* millicelsius */ + type = "active"; + }; + cpu-crit-0 { + temperature = <1200000>; /* millicelsius */ + hysteresis = <0>; /* millicelsius */ + type = "critical"; + }; +}; diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 9dc2e9773b30..c0e98cf3514f 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -740,8 +740,9 @@ compatible = "samsung,exynos5420-mixer"; reg = <0x14450000 0x10000>; interrupts = <0 94 0>; - clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; - clock-names = "mixer", "sclk_hdmi"; + clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, + <&clock CLK_SCLK_HDMI>; + clock-names = "mixer", "hdmi", "sclk_hdmi"; power-domains = <&disp_pd>; }; @@ -782,6 +783,7 @@ interrupts = <0 65 0>; clocks = <&clock CLK_TMU>; clock-names = "tmu_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; tmu_cpu1: tmu@10064000 { @@ -790,6 +792,7 @@ interrupts = <0 183 0>; clocks = <&clock CLK_TMU>; clock-names = "tmu_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; tmu_cpu2: tmu@10068000 { @@ -798,6 +801,7 @@ interrupts = <0 184 0>; clocks = <&clock CLK_TMU>, <&clock CLK_TMU>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; tmu_cpu3: tmu@1006c000 { @@ -806,6 +810,7 @@ interrupts = <0 185 0>; clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; tmu_gpu: tmu@100a0000 { @@ -814,6 +819,30 @@ interrupts = <0 215 0>; clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" + }; + + thermal-zones { + cpu0_thermal: cpu0-thermal { + thermal-sensors = <&tmu_cpu0>; + #include "exynos5420-trip-points.dtsi" + }; + cpu1_thermal: cpu1-thermal { + thermal-sensors = <&tmu_cpu1>; + #include "exynos5420-trip-points.dtsi" + }; + cpu2_thermal: cpu2-thermal { + thermal-sensors = <&tmu_cpu2>; + #include "exynos5420-trip-points.dtsi" + }; + cpu3_thermal: cpu3-thermal { + thermal-sensors = <&tmu_cpu3>; + #include "exynos5420-trip-points.dtsi" + }; + gpu_thermal: gpu-thermal { + thermal-sensors = <&tmu_gpu>; + #include "exynos5420-trip-points.dtsi" + }; }; watchdog: watchdog@101D0000 { diff --git a/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi new file mode 100644 index 000000000000..7b2fba0ae92b --- /dev/null +++ b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi @@ -0,0 +1,24 @@ +/* + * Device tree sources for Exynos5440 TMU sensor configuration + * + * Copyright (c) 2014 Lukasz Majewski <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <dt-bindings/thermal/thermal_exynos.h> + +#thermal-sensor-cells = <0>; +samsung,tmu_gain = <5>; +samsung,tmu_reference_voltage = <16>; +samsung,tmu_noise_cancel_mode = <4>; +samsung,tmu_efuse_value = <0x5d2d>; +samsung,tmu_min_efuse_value = <16>; +samsung,tmu_max_efuse_value = <76>; +samsung,tmu_first_point_trim = <25>; +samsung,tmu_second_point_trim = <70>; +samsung,tmu_default_temp_offset = <25>; +samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>; diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi new file mode 100644 index 000000000000..48adfa8f4300 --- /dev/null +++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi @@ -0,0 +1,25 @@ +/* + * Device tree sources for default Exynos5440 thermal zone definition + * + * Copyright (c) 2014 Lukasz Majewski <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +polling-delay-passive = <0>; +polling-delay = <0>; +trips { + cpu-alert-0 { + temperature = <100000>; /* millicelsius */ + hysteresis = <0>; /* millicelsius */ + type = "active"; + }; + cpu-crit-0 { + temperature = <1050000>; /* millicelsius */ + hysteresis = <0>; /* millicelsius */ + type = "critical"; + }; +}; diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index 8f3373cd7b87..59d9416b3b03 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi @@ -219,6 +219,7 @@ interrupts = <0 58 0>; clocks = <&clock CLK_B_125>; clock-names = "tmu_apbif"; + #include "exynos5440-tmu-sensor-conf.dtsi" }; tmuctrl_1: tmuctrl@16011C { @@ -227,6 +228,7 @@ interrupts = <0 58 0>; clocks = <&clock CLK_B_125>; clock-names = "tmu_apbif"; + #include "exynos5440-tmu-sensor-conf.dtsi" }; tmuctrl_2: tmuctrl@160120 { @@ -235,6 +237,22 @@ interrupts = <0 58 0>; clocks = <&clock CLK_B_125>; clock-names = "tmu_apbif"; + #include "exynos5440-tmu-sensor-conf.dtsi" + }; + + thermal-zones { + cpu0_thermal: cpu0-thermal { + thermal-sensors = <&tmuctrl_0>; + #include "exynos5440-trip-points.dtsi" + }; + cpu1_thermal: cpu1-thermal { + thermal-sensors = <&tmuctrl_1>; + #include "exynos5440-trip-points.dtsi" + }; + cpu2_thermal: cpu2-thermal { + thermal-sensors = <&tmuctrl_2>; + #include "exynos5440-trip-points.dtsi" + }; }; sata@210000 { diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index f1cd2147421d..a626e6dd8022 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -35,6 +35,7 @@ regulator-max-microvolt = <5000000>; gpio = <&gpio3 22 0>; enable-active-high; + vin-supply = <&swbst_reg>; }; reg_usb_h1_vbus: regulator@1 { @@ -45,6 +46,7 @@ regulator-max-microvolt = <5000000>; gpio = <&gpio1 29 0>; enable-active-high; + vin-supply = <&swbst_reg>; }; reg_audio: regulator@2 { diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index fda4932faefd..945887d3fdb3 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -52,6 +52,7 @@ regulator-max-microvolt = <5000000>; gpio = <&gpio4 0 0>; enable-active-high; + vin-supply = <&swbst_reg>; }; reg_usb_otg2_vbus: regulator@1 { @@ -62,6 +63,7 @@ regulator-max-microvolt = <5000000>; gpio = <&gpio4 2 0>; enable-active-high; + vin-supply = <&swbst_reg>; }; reg_aud3v: regulator@2 { diff --git a/arch/arm/boot/dts/omap5-core-thermal.dtsi b/arch/arm/boot/dts/omap5-core-thermal.dtsi index 19212ac6eef0..de8a3d456cf7 100644 --- a/arch/arm/boot/dts/omap5-core-thermal.dtsi +++ b/arch/arm/boot/dts/omap5-core-thermal.dtsi @@ -13,7 +13,7 @@ core_thermal: core_thermal { polling-delay-passive = <250>; /* milliseconds */ - polling-delay = <1000>; /* milliseconds */ + polling-delay = <500>; /* milliseconds */ /* sensor ID */ thermal-sensors = <&bandgap 2>; diff --git a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi index 1b87aca88b77..bc3090f2e84b 100644 --- a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi +++ b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi @@ -13,7 +13,7 @@ gpu_thermal: gpu_thermal { polling-delay-passive = <250>; /* milliseconds */ - polling-delay = <1000>; /* milliseconds */ + polling-delay = <500>; /* milliseconds */ /* sensor ID */ thermal-sensors = <&bandgap 1>; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index ddff674bd05e..4a485b63a141 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -1079,4 +1079,8 @@ }; }; +&cpu_thermal { + polling-delay = <500>; /* milliseconds */ +}; + /include/ "omap54xx-clocks.dtsi" diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi index 58c27466f012..83b425fb3ac2 100644 --- a/arch/arm/boot/dts/omap54xx-clocks.dtsi +++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi @@ -167,10 +167,18 @@ ti,index-starts-at-one; }; + dpll_core_byp_mux: dpll_core_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; + ti,bit-shift = <23>; + reg = <0x012c>; + }; + dpll_core_ck: dpll_core_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-core-clock"; - clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; + clocks = <&sys_clkin>, <&dpll_core_byp_mux>; reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; }; @@ -294,10 +302,18 @@ clock-div = <1>; }; + dpll_iva_byp_mux: dpll_iva_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x01ac>; + }; + dpll_iva_ck: dpll_iva_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; + clocks = <&sys_clkin>, <&dpll_iva_byp_mux>; reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; }; @@ -599,10 +615,19 @@ }; }; &cm_core_clocks { + + dpll_per_byp_mux: dpll_per_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x014c>; + }; + dpll_per_ck: dpll_per_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-clock"; - clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; + clocks = <&sys_clkin>, <&dpll_per_byp_mux>; reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; }; @@ -714,10 +739,18 @@ ti,index-starts-at-one; }; + dpll_usb_byp_mux: dpll_usb_byp_mux { + #clock-cells = <0>; + compatible = "ti,mux-clock"; + clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; + ti,bit-shift = <23>; + reg = <0x018c>; + }; + dpll_usb_ck: dpll_usb_ck { #clock-cells = <0>; compatible = "ti,omap4-dpll-j-type-clock"; - clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; + clocks = <&sys_clkin>, <&dpll_usb_byp_mux>; reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; }; diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 261311bdf65b..367af53c1b84 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -1248,7 +1248,6 @@ atmel,watchdog-type = "hardware"; atmel,reset-type = "all"; atmel,dbg-halt; - atmel,idle-halt; status = "disabled"; }; @@ -1416,7 +1415,7 @@ compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; reg = <0x00700000 0x100000>; interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&uhpck>; + clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ehci_clk", "uhpck"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index d986b41b9654..4303874889c6 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -66,6 +66,7 @@ gpio4 = &pioE; tcb0 = &tcb0; tcb1 = &tcb1; + i2c0 = &i2c0; i2c2 = &i2c2; }; cpus { @@ -259,7 +260,7 @@ compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; reg = <0x00600000 0x100000>; interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&uhpck>; + clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ehci_clk", "uhpck"; status = "disabled"; }; @@ -461,8 +462,8 @@ lcdck: lcdck { #clock-cells = <0>; - reg = <4>; - clocks = <&smd>; + reg = <3>; + clocks = <&mck>; }; smdck: smdck { @@ -770,7 +771,7 @@ reg = <50>; }; - lcd_clk: lcd_clk { + lcdc_clk: lcdc_clk { #clock-cells = <0>; reg = <51>; }; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 252c3d1bda50..9d8760956752 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -713,6 +713,9 @@ reg-shift = <2>; reg-io-width = <4>; clocks = <&l4_sp_clk>; + dmas = <&pdma 28>, + <&pdma 29>; + dma-names = "tx", "rx"; }; uart1: serial1@ffc03000 { @@ -722,6 +725,9 @@ reg-shift = <2>; reg-io-width = <4>; clocks = <&l4_sp_clk>; + dmas = <&pdma 30>, + <&pdma 31>; + dma-names = "tx", "rx"; }; rst: rstmgr@ffd05000 { diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index f2670f638e97..811e72bbe642 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig @@ -70,6 +70,7 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y +CONFIG_ARM_AT91_ETHER=y CONFIG_MACB=y # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_DM9000=y diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index b7e6b6fba5e0..06075b6d2463 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -99,7 +99,7 @@ CONFIG_PCI_RCAR_GEN2=y CONFIG_PCI_RCAR_GEN2_PCIE=y CONFIG_PCIEPORTBUS=y CONFIG_SMP=y -CONFIG_NR_CPUS=8 +CONFIG_NR_CPUS=16 CONFIG_HIGHPTE=y CONFIG_CMA=y CONFIG_ARM_APPENDED_DTB=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index a097cffa1231..8e108599e1af 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -377,6 +377,7 @@ CONFIG_PWM_TWL=m CONFIG_PWM_TWL_LED=m CONFIG_OMAP_USB2=m CONFIG_TI_PIPE3=y +CONFIG_TWL4030_USB=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index 41d856effe6c..510c747c65b4 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig @@ -3,8 +3,6 @@ CONFIG_SYSVIPC=y CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_SYSFS_DEPRECATED=y -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_EMBEDDED=y CONFIG_SLAB=y diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 38840a812924..8f6a5702b696 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -4,6 +4,7 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_PERF_EVENTS=y CONFIG_ARCH_SUNXI=y CONFIG_SMP=y +CONFIG_NR_CPUS=8 CONFIG_AEABI=y CONFIG_HIGHMEM=y CONFIG_HIGHPTE=y diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig index f489fdaa19b8..37fe607a4ede 100644 --- a/arch/arm/configs/vexpress_defconfig +++ b/arch/arm/configs/vexpress_defconfig @@ -118,8 +118,8 @@ CONFIG_HID_ZEROPLUS=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_MON=y -CONFIG_USB_ISP1760_HCD=y CONFIG_USB_STORAGE=y +CONFIG_USB_ISP1760=y CONFIG_MMC=y CONFIG_MMC_ARMMMCI=y CONFIG_NEW_LEDS=y diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped index 71e5fc7cfb18..1d1800f71c5b 100644 --- a/arch/arm/crypto/aesbs-core.S_shipped +++ b/arch/arm/crypto/aesbs-core.S_shipped @@ -58,14 +58,18 @@ # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ 7 +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ 7 #endif #ifdef __thumb__ # define adrl adr #endif -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 +.arch armv7-a +.fpu neon + .text .syntax unified @ ARMv7-capable assembler is expected to handle this #ifdef __thumb2__ @@ -74,8 +78,6 @@ .code 32 #endif -.fpu neon - .type _bsaes_decrypt8,%function .align 4 _bsaes_decrypt8: @@ -2095,9 +2097,11 @@ bsaes_xts_decrypt: vld1.8 {q8}, [r0] @ initial tweak adr r2, .Lxts_magic +#ifndef XTS_CHAIN_TWEAK tst r9, #0xf @ if not multiple of 16 it ne @ Thumb2 thing, sanity check in ARM subne r9, #0x10 @ subtract another 16 bytes +#endif subs r9, #0x80 blo .Lxts_dec_short diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl index be068db960ee..a4d3856e7d24 100644 --- a/arch/arm/crypto/bsaes-armv7.pl +++ b/arch/arm/crypto/bsaes-armv7.pl @@ -701,14 +701,18 @@ $code.=<<___; # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ 7 +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ 7 #endif #ifdef __thumb__ # define adrl adr #endif -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 +.arch armv7-a +.fpu neon + .text .syntax unified @ ARMv7-capable assembler is expected to handle this #ifdef __thumb2__ @@ -717,8 +721,6 @@ $code.=<<___; .code 32 #endif -.fpu neon - .type _bsaes_decrypt8,%function .align 4 _bsaes_decrypt8: @@ -2076,9 +2078,11 @@ bsaes_xts_decrypt: vld1.8 {@XMM[8]}, [r0] @ initial tweak adr $magic, .Lxts_magic +#ifndef XTS_CHAIN_TWEAK tst $len, #0xf @ if not multiple of 16 it ne @ Thumb2 thing, sanity check in ARM subne $len, #0x10 @ subtract another 16 bytes +#endif subs $len, #0x80 blo .Lxts_dec_short diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index bf0fe99e8ca9..4cf48c3aca13 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -149,29 +149,28 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) +#define kvm_pgd_index(addr) pgd_index(addr) + static inline bool kvm_page_empty(void *ptr) { struct page *ptr_page = virt_to_page(ptr); return page_count(ptr_page) == 1; } - #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) #define kvm_pud_table_empty(kvm, pudp) (0) #define KVM_PREALLOC_LEVEL 0 -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) +static inline void *kvm_get_hwpgd(struct kvm *kvm) { - return 0; + return kvm->arch.pgd; } -static inline void kvm_free_hwpgd(struct kvm *kvm) { } - -static inline void *kvm_get_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - return kvm->arch.pgd; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } struct kvm; diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S index 80a6501b4d50..c3c45e628e33 100644 --- a/arch/arm/include/debug/at91.S +++ b/arch/arm/include/debug/at91.S @@ -18,8 +18,11 @@ #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */ #endif -/* Keep in sync with mach-at91/include/mach/hardware.h */ +#ifdef CONFIG_MMU #define AT91_IO_P2V(x) ((x) - 0x01000000) +#else +#define AT91_IO_P2V(x) (x) +#endif #define AT91_DBGU_SR (0x14) /* Status Register */ #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 3e6859bc3e11..5656d79c5a44 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, phys_addr_t addr = start, end = start + size; phys_addr_t next; - pgd = pgdp + pgd_index(addr); + pgd = pgdp + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); if (!pgd_none(*pgd)) @@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm, phys_addr_t next; pgd_t *pgd; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); stage2_flush_puds(kvm, pgd, addr, next); @@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); } +/* Free the HW pgd, one page at a time */ +static void kvm_free_hwpgd(void *hwpgd) +{ + free_pages_exact(hwpgd, kvm_get_hwpgd_size()); +} + +/* Allocate the HW PGD, making sure that each page gets its own refcount */ +static void *kvm_alloc_hwpgd(void) +{ + unsigned int size = kvm_get_hwpgd_size(); + + return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); +} + /** * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. * @kvm: The KVM struct pointer for the VM. @@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) */ int kvm_alloc_stage2_pgd(struct kvm *kvm) { - int ret; pgd_t *pgd; + void *hwpgd; if (kvm->arch.pgd != NULL) { kvm_err("kvm_arch already initialized?\n"); return -EINVAL; } + hwpgd = kvm_alloc_hwpgd(); + if (!hwpgd) + return -ENOMEM; + + /* When the kernel uses more levels of page tables than the + * guest, we allocate a fake PGD and pre-populate it to point + * to the next-level page table, which will be the real + * initial page table pointed to by the VTTBR. + * + * When KVM_PREALLOC_LEVEL==2, we allocate a single page for + * the PMD and the kernel will use folded pud. + * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD + * pages. + */ if (KVM_PREALLOC_LEVEL > 0) { + int i; + /* * Allocate fake pgd for the page table manipulation macros to * work. This is not used by the hardware and we have no @@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) */ pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), GFP_KERNEL | __GFP_ZERO); + + if (!pgd) { + kvm_free_hwpgd(hwpgd); + return -ENOMEM; + } + + /* Plug the HW PGD into the fake one. */ + for (i = 0; i < PTRS_PER_S2_PGD; i++) { + if (KVM_PREALLOC_LEVEL == 1) + pgd_populate(NULL, pgd + i, + (pud_t *)hwpgd + i * PTRS_PER_PUD); + else if (KVM_PREALLOC_LEVEL == 2) + pud_populate(NULL, pud_offset(pgd, 0) + i, + (pmd_t *)hwpgd + i * PTRS_PER_PMD); + } } else { /* * Allocate actual first-level Stage-2 page table used by the * hardware for Stage-2 page table walks. */ - pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); + pgd = (pgd_t *)hwpgd; } - if (!pgd) - return -ENOMEM; - - ret = kvm_prealloc_hwpgd(kvm, pgd); - if (ret) - goto out_err; - kvm_clean_pgd(pgd); kvm->arch.pgd = pgd; return 0; -out_err: - if (KVM_PREALLOC_LEVEL > 0) - kfree(pgd); - else - free_pages((unsigned long)pgd, S2_PGD_ORDER); - return ret; } /** @@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) return; unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); - kvm_free_hwpgd(kvm); + kvm_free_hwpgd(kvm_get_hwpgd(kvm)); if (KVM_PREALLOC_LEVEL > 0) kfree(kvm->arch.pgd); - else - free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); + kvm->arch.pgd = NULL; } @@ -799,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache pgd_t *pgd; pud_t *pud; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); if (WARN_ON(pgd_none(*pgd))) { if (!cache) return NULL; @@ -1089,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) pgd_t *pgd; phys_addr_t next; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); do { /* * Release kvm_mmu_lock periodically if the memory region is diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 5e34fb143309..aa4116e9452f 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -270,37 +270,35 @@ static void __init at91_pm_sram_init(void) phys_addr_t sram_pbase; unsigned long sram_base; struct device_node *node; - struct platform_device *pdev; + struct platform_device *pdev = NULL; - node = of_find_compatible_node(NULL, NULL, "mmio-sram"); - if (!node) { - pr_warn("%s: failed to find sram node!\n", __func__); - return; + for_each_compatible_node(node, NULL, "mmio-sram") { + pdev = of_find_device_by_node(node); + if (pdev) { + of_node_put(node); + break; + } } - pdev = of_find_device_by_node(node); if (!pdev) { pr_warn("%s: failed to find sram device!\n", __func__); - goto put_node; + return; } sram_pool = dev_get_gen_pool(&pdev->dev); if (!sram_pool) { pr_warn("%s: sram pool unavailable!\n", __func__); - goto put_node; + return; } sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz); if (!sram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); - goto put_node; + return; } sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false); - -put_node: - of_node_put(node); } #endif diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h index d2c89963af2d..86c0aa819d25 100644 --- a/arch/arm/mach-at91/pm.h +++ b/arch/arm/mach-at91/pm.h @@ -44,7 +44,7 @@ static inline void at91rm9200_standby(void) " mcr p15, 0, %0, c7, c0, 4\n\t" " str %5, [%1, %2]" : - : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR), + : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR), "r" (1), "r" (AT91RM9200_SDRAMC_SRR), "r" (lpr)); } diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S index 556151e85ec4..931f0e302c03 100644 --- a/arch/arm/mach-at91/pm_slowclock.S +++ b/arch/arm/mach-at91/pm_slowclock.S @@ -25,11 +25,6 @@ */ #undef SLOWDOWN_MASTER_CLOCK -#define MCKRDY_TIMEOUT 1000 -#define MOSCRDY_TIMEOUT 1000 -#define PLLALOCK_TIMEOUT 1000 -#define PLLBLOCK_TIMEOUT 1000 - pmc .req r0 sdramc .req r1 ramc1 .req r2 @@ -41,60 +36,42 @@ tmp2 .req r5 * Wait until master clock is ready (after switching master clock source) */ .macro wait_mckrdy - mov tmp2, #MCKRDY_TIMEOUT -1: sub tmp2, tmp2, #1 - cmp tmp2, #0 - beq 2f - ldr tmp1, [pmc, #AT91_PMC_SR] +1: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_MCKRDY beq 1b -2: .endm /* * Wait until master oscillator has stabilized. */ .macro wait_moscrdy - mov tmp2, #MOSCRDY_TIMEOUT -1: sub tmp2, tmp2, #1 - cmp tmp2, #0 - beq 2f - ldr tmp1, [pmc, #AT91_PMC_SR] +1: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_MOSCS beq 1b -2: .endm /* * Wait until PLLA has locked. */ .macro wait_pllalock - mov tmp2, #PLLALOCK_TIMEOUT -1: sub tmp2, tmp2, #1 - cmp tmp2, #0 - beq 2f - ldr tmp1, [pmc, #AT91_PMC_SR] +1: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_LOCKA beq 1b -2: .endm /* * Wait until PLLB has locked. */ .macro wait_pllblock - mov tmp2, #PLLBLOCK_TIMEOUT -1: sub tmp2, tmp2, #1 - cmp tmp2, #0 - beq 2f - ldr tmp1, [pmc, #AT91_PMC_SR] +1: ldr tmp1, [pmc, #AT91_PMC_SR] tst tmp1, #AT91_PMC_LOCKB beq 1b -2: .endm .text + .arm + /* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc, * void __iomem *ramc1, int memctrl) */ @@ -134,6 +111,16 @@ ddr_sr_enable: cmp memctrl, #AT91_MEMCTRL_DDRSDR bne sdr_sr_enable + /* LPDDR1 --> force DDR2 mode during self-refresh */ + ldr tmp1, [sdramc, #AT91_DDRSDRC_MDR] + str tmp1, .saved_sam9_mdr + bic tmp1, tmp1, #~AT91_DDRSDRC_MD + cmp tmp1, #AT91_DDRSDRC_MD_LOW_POWER_DDR + ldreq tmp1, [sdramc, #AT91_DDRSDRC_MDR] + biceq tmp1, tmp1, #AT91_DDRSDRC_MD + orreq tmp1, tmp1, #AT91_DDRSDRC_MD_DDR2 + streq tmp1, [sdramc, #AT91_DDRSDRC_MDR] + /* prepare for DDRAM self-refresh mode */ ldr tmp1, [sdramc, #AT91_DDRSDRC_LPR] str tmp1, .saved_sam9_lpr @@ -142,14 +129,26 @@ ddr_sr_enable: /* figure out if we use the second ram controller */ cmp ramc1, #0 - ldrne tmp2, [ramc1, #AT91_DDRSDRC_LPR] - strne tmp2, .saved_sam9_lpr1 - bicne tmp2, #AT91_DDRSDRC_LPCB - orrne tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH + beq ddr_no_2nd_ctrl + + ldr tmp2, [ramc1, #AT91_DDRSDRC_MDR] + str tmp2, .saved_sam9_mdr1 + bic tmp2, tmp2, #~AT91_DDRSDRC_MD + cmp tmp2, #AT91_DDRSDRC_MD_LOW_POWER_DDR + ldreq tmp2, [ramc1, #AT91_DDRSDRC_MDR] + biceq tmp2, tmp2, #AT91_DDRSDRC_MD + orreq tmp2, tmp2, #AT91_DDRSDRC_MD_DDR2 + streq tmp2, [ramc1, #AT91_DDRSDRC_MDR] + + ldr tmp2, [ramc1, #AT91_DDRSDRC_LPR] + str tmp2, .saved_sam9_lpr1 + bic tmp2, #AT91_DDRSDRC_LPCB + orr tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH /* Enable DDRAM self-refresh mode */ + str tmp2, [ramc1, #AT91_DDRSDRC_LPR] +ddr_no_2nd_ctrl: str tmp1, [sdramc, #AT91_DDRSDRC_LPR] - strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] b sdr_sr_done @@ -208,6 +207,7 @@ sdr_sr_done: /* Turn off the main oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] bic tmp1, tmp1, #AT91_PMC_MOSCEN + orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] /* Wait for interrupt */ @@ -216,6 +216,7 @@ sdr_sr_done: /* Turn on the main oscillator */ ldr tmp1, [pmc, #AT91_CKGR_MOR] orr tmp1, tmp1, #AT91_PMC_MOSCEN + orr tmp1, tmp1, #AT91_PMC_KEY str tmp1, [pmc, #AT91_CKGR_MOR] wait_moscrdy @@ -280,12 +281,17 @@ sdr_sr_done: */ cmp memctrl, #AT91_MEMCTRL_DDRSDR bne sdr_en_restore + /* Restore MDR in case of LPDDR1 */ + ldr tmp1, .saved_sam9_mdr + str tmp1, [sdramc, #AT91_DDRSDRC_MDR] /* Restore LPR on AT91 with DDRAM */ ldr tmp1, .saved_sam9_lpr str tmp1, [sdramc, #AT91_DDRSDRC_LPR] /* if we use the second ram controller */ cmp ramc1, #0 + ldrne tmp2, .saved_sam9_mdr1 + strne tmp2, [ramc1, #AT91_DDRSDRC_MDR] ldrne tmp2, .saved_sam9_lpr1 strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] @@ -319,5 +325,11 @@ ram_restored: .saved_sam9_lpr1: .word 0 +.saved_sam9_mdr: + .word 0 + +.saved_sam9_mdr1: + .word 0 + ENTRY(at91_slow_clock_sz) .word .-at91_slow_clock diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 3f32c47a6d74..d2e9f12d12f1 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -126,8 +126,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious) */ void exynos_cpu_power_down(int cpu) { - if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") || - of_machine_is_compatible("samsung,exynos5800"))) { + if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { /* * Bypass power down for CPU0 during suspend. Check for * the SYS_PWR_REG value to decide if we are suspending diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 20f267121b3e..37266a826437 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c @@ -161,6 +161,34 @@ no_clk: of_genpd_add_provider_simple(np, &pd->pd); } + /* Assign the child power domains to their parents */ + for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") { + struct generic_pm_domain *child_domain, *parent_domain; + struct of_phandle_args args; + + args.np = np; + args.args_count = 0; + child_domain = of_genpd_get_from_provider(&args); + if (!child_domain) + continue; + + if (of_parse_phandle_with_args(np, "power-domains", + "#power-domain-cells", 0, &args) != 0) + continue; + + parent_domain = of_genpd_get_from_provider(&args); + if (!parent_domain) + continue; + + if (pm_genpd_add_subdomain(parent_domain, child_domain)) + pr_warn("%s failed to add subdomain: %s\n", + parent_domain->name, child_domain->name); + else + pr_info("%s has as child subdomain: %s.\n", + parent_domain->name, child_domain->name); + of_node_put(np); + } + return 0; } arch_initcall(exynos4_pm_init_power_domain); diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 52e2b1a2fddb..318d127df147 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3; static u32 exynos_irqwake_intmask = 0xffffffff; static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { - { 73, BIT(1) }, /* RTC alarm */ - { 74, BIT(2) }, /* RTC tick */ + { 105, BIT(1) }, /* RTC alarm */ + { 106, BIT(2) }, /* RTC tick */ { /* sentinel */ }, }; diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 4ad6e473cf83..9de3412af406 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c @@ -211,8 +211,9 @@ static void __init imx6q_1588_init(void) * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad * (external OSC), and we need to clear the bit. */ - clksel = ptp_clk == enet_ref ? IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : - IMX6Q_GPR1_ENET_CLK_SEL_PAD; + clksel = clk_is_match(ptp_clk, enet_ref) ? + IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : + IMX6Q_GPR1_ENET_CLK_SEL_PAD; gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); if (!IS_ERR(gpr)) regmap_update_bits(gpr, IOMUXC_GPR1, diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 92afb723dcfc..355b08936871 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1692,16 +1692,15 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name) if (ret == -EBUSY) pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); - if (!ret) { + if (oh->clkdm) { /* * Set the clockdomain to HW_AUTO, assuming that the * previous state was HW_AUTO. */ - if (oh->clkdm && hwsup) + if (hwsup) clkdm_allow_idle(oh->clkdm); - } else { - if (oh->clkdm) - clkdm_hwmod_disable(oh->clkdm, oh); + + clkdm_hwmod_disable(oh->clkdm, oh); } return ret; @@ -2698,6 +2697,7 @@ static int __init _register(struct omap_hwmod *oh) INIT_LIST_HEAD(&oh->master_ports); INIT_LIST_HEAD(&oh->slave_ports); spin_lock_init(&oh->_lock); + lockdep_set_class(&oh->_lock, &oh->hwmod_key); oh->_state = _HWMOD_STATE_REGISTERED; diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index 9d4bec6ee742..9611c91d9b82 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h @@ -674,6 +674,7 @@ struct omap_hwmod { u32 _sysc_cache; void __iomem *_mpu_rt_va; spinlock_t _lock; + struct lock_class_key hwmod_key; /* unique lock class */ struct list_head node; struct omap_hwmod_ocp_if *_mpu_port; unsigned int (*xlate_irq)(unsigned int); diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index e8692e7675b8..16fe7a1b7a35 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -1466,55 +1466,18 @@ static struct omap_hwmod dra7xx_ocp2scp3_hwmod = { * */ -static struct omap_hwmod_class dra7xx_pcie_hwmod_class = { +static struct omap_hwmod_class dra7xx_pciess_hwmod_class = { .name = "pcie", }; /* pcie1 */ -static struct omap_hwmod dra7xx_pcie1_hwmod = { +static struct omap_hwmod dra7xx_pciess1_hwmod = { .name = "pcie1", - .class = &dra7xx_pcie_hwmod_class, + .class = &dra7xx_pciess_hwmod_class, .clkdm_name = "pcie_clkdm", .main_clk = "l4_root_clk_div", .prcm = { .omap4 = { - .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* pcie2 */ -static struct omap_hwmod dra7xx_pcie2_hwmod = { - .name = "pcie2", - .class = &dra7xx_pcie_hwmod_class, - .clkdm_name = "pcie_clkdm", - .main_clk = "l4_root_clk_div", - .prcm = { - .omap4 = { - .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET, - .modulemode = MODULEMODE_SWCTRL, - }, - }, -}; - -/* - * 'PCIE PHY' class - * - */ - -static struct omap_hwmod_class dra7xx_pcie_phy_hwmod_class = { - .name = "pcie-phy", -}; - -/* pcie1 phy */ -static struct omap_hwmod dra7xx_pcie1_phy_hwmod = { - .name = "pcie1-phy", - .class = &dra7xx_pcie_phy_hwmod_class, - .clkdm_name = "l3init_clkdm", - .main_clk = "l4_root_clk_div", - .prcm = { - .omap4 = { .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET, .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, @@ -1522,11 +1485,11 @@ static struct omap_hwmod dra7xx_pcie1_phy_hwmod = { }, }; -/* pcie2 phy */ -static struct omap_hwmod dra7xx_pcie2_phy_hwmod = { - .name = "pcie2-phy", - .class = &dra7xx_pcie_phy_hwmod_class, - .clkdm_name = "l3init_clkdm", +/* pcie2 */ +static struct omap_hwmod dra7xx_pciess2_hwmod = { + .name = "pcie2", + .class = &dra7xx_pciess_hwmod_class, + .clkdm_name = "pcie_clkdm", .main_clk = "l4_root_clk_div", .prcm = { .omap4 = { @@ -2877,50 +2840,34 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp3 = { .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l3_main_1 -> pcie1 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie1 = { +/* l3_main_1 -> pciess1 */ +static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = { .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_pcie1_hwmod, + .slave = &dra7xx_pciess1_hwmod, .clk = "l3_iclk_div", .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l4_cfg -> pcie1 */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1 = { +/* l4_cfg -> pciess1 */ +static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = { .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_pcie1_hwmod, + .slave = &dra7xx_pciess1_hwmod, .clk = "l4_root_clk_div", .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l3_main_1 -> pcie2 */ -static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie2 = { +/* l3_main_1 -> pciess2 */ +static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = { .master = &dra7xx_l3_main_1_hwmod, - .slave = &dra7xx_pcie2_hwmod, + .slave = &dra7xx_pciess2_hwmod, .clk = "l3_iclk_div", .user = OCP_USER_MPU | OCP_USER_SDMA, }; -/* l4_cfg -> pcie2 */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2 = { - .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_pcie2_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> pcie1 phy */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1_phy = { - .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_pcie1_phy_hwmod, - .clk = "l4_root_clk_div", - .user = OCP_USER_MPU | OCP_USER_SDMA, -}; - -/* l4_cfg -> pcie2 phy */ -static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2_phy = { +/* l4_cfg -> pciess2 */ +static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = { .master = &dra7xx_l4_cfg_hwmod, - .slave = &dra7xx_pcie2_phy_hwmod, + .slave = &dra7xx_pciess2_hwmod, .clk = "l4_root_clk_div", .user = OCP_USER_MPU | OCP_USER_SDMA, }; @@ -3327,12 +3274,10 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { &dra7xx_l4_cfg__mpu, &dra7xx_l4_cfg__ocp2scp1, &dra7xx_l4_cfg__ocp2scp3, - &dra7xx_l3_main_1__pcie1, - &dra7xx_l4_cfg__pcie1, - &dra7xx_l3_main_1__pcie2, - &dra7xx_l4_cfg__pcie2, - &dra7xx_l4_cfg__pcie1_phy, - &dra7xx_l4_cfg__pcie2_phy, + &dra7xx_l3_main_1__pciess1, + &dra7xx_l4_cfg__pciess1, + &dra7xx_l3_main_1__pciess2, + &dra7xx_l4_cfg__pciess2, &dra7xx_l3_main_1__qspi, &dra7xx_l4_per3__rtcss, &dra7xx_l4_cfg__sata, diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 190fa43e7479..e642b079e9f3 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -173,6 +173,7 @@ static void __init omap3_igep0030_rev_g_legacy_init(void) static void __init omap3_evm_legacy_init(void) { + hsmmc2_internal_input_clk(); legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149); } diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index a08a617a6c11..d6d6bc39e05c 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -252,10 +252,10 @@ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) { saved_mask[0] = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, - OMAP4_PRM_IRQSTATUS_MPU_OFFSET); + OMAP4_PRM_IRQENABLE_MPU_OFFSET); saved_mask[1] = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, - OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); + OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_OFFSET); diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index 483cb467bf65..a0f3b1cd497c 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h @@ -45,6 +45,6 @@ extern char secondary_trampoline, secondary_trampoline_end; extern unsigned long socfpga_cpu1start_addr; -#define SOCFPGA_SCU_VIRT_BASE 0xfffec000 +#define SOCFPGA_SCU_VIRT_BASE 0xfee00000 #endif diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 383d61e138af..f5e597c207b9 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c @@ -23,6 +23,7 @@ #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> +#include <asm/cacheflush.h> #include "core.h" @@ -73,6 +74,10 @@ void __init socfpga_sysmgr_init(void) (u32 *) &socfpga_cpu1start_addr)) pr_err("SMP: Need cpu1-start-addr in device tree.\n"); + /* Ensure that socfpga_cpu1start_addr is visible to other CPUs */ + smp_wmb(); + sync_cache_w(&socfpga_cpu1start_addr); + sys_manager_base_addr = of_iomap(np, 0); np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c index b067390cef4e..b373acade338 100644 --- a/arch/arm/mach-sti/board-dt.c +++ b/arch/arm/mach-sti/board-dt.c @@ -18,6 +18,7 @@ static const char *stih41x_dt_match[] __initdata = { "st,stih415", "st,stih416", "st,stih407", + "st,stih410", "st,stih418", NULL }; diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts index 2e25de0800b9..83578e766b94 100644 --- a/arch/arm64/boot/dts/apm/apm-mustang.dts +++ b/arch/arm64/boot/dts/apm/apm-mustang.dts @@ -45,6 +45,10 @@ status = "ok"; }; +&sgenet1 { + status = "ok"; +}; + &xgenet { status = "ok"; }; diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index a857794432d6..c1eb6911e539 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -186,6 +186,16 @@ clock-output-names = "sge0clk"; }; + sge1clk: sge1clk@1f21c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f21c000 0x0 0x1000>; + reg-names = "csr-reg"; + csr-mask = <0xc>; + clock-output-names = "sge1clk"; + }; + xge0clk: xge0clk@1f61c000 { compatible = "apm,xgene-device-clock"; #clock-cells = <1>; @@ -635,6 +645,21 @@ phy-connection-type = "sgmii"; }; + sgenet1: ethernet@1f210030 { + compatible = "apm,xgene1-sgenet"; + status = "disabled"; + reg = <0x0 0x1f210030 0x0 0xd100>, + <0x0 0x1f200000 0x0 0Xc300>, + <0x0 0x1B000000 0x0 0X8000>; + reg-names = "enet_csr", "ring_csr", "ring_cmd"; + interrupts = <0x0 0xAC 0x4>; + port-id = <1>; + dma-coherent; + clocks = <&sge1clk 0>; + local-mac-address = [00 00 00 00 00 00]; + phy-connection-type = "sgmii"; + }; + xgenet: ethernet@1f610000 { compatible = "apm,xgene1-xgenet"; status = "disabled"; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 94674eb7e7bb..54bb4ba97441 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -129,6 +129,9 @@ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * + * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time + * (see hyp-init.S). + * * Note that when using 4K pages, we concatenate two first level page tables * together. * @@ -138,7 +141,6 @@ #ifdef CONFIG_ARM64_64K_PAGES /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 64kB pages (TG0 = 1) * 2 level page tables (SL = 1) @@ -150,7 +152,6 @@ #else /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 4kB pages (TG0 = 0) * 3 level page tables (SL = 1) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b5373142..bbfb600fa822 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) + /* * If we are concatenating first level stage-2 page tables, we would have less * than or equal to 16 pointers in the fake PGD, because that's what the @@ -171,43 +173,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define KVM_PREALLOC_LEVEL (0) #endif -/** - * kvm_prealloc_hwpgd - allocate inital table for VTTBR - * @kvm: The KVM struct pointer for the VM. - * @pgd: The kernel pseudo pgd - * - * When the kernel uses more levels of page tables than the guest, we allocate - * a fake PGD and pre-populate it to point to the next-level page table, which - * will be the real initial page table pointed to by the VTTBR. - * - * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and - * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we - * allocate 2 consecutive PUD pages. - */ -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) -{ - unsigned int i; - unsigned long hwpgd; - - if (KVM_PREALLOC_LEVEL == 0) - return 0; - - hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); - if (!hwpgd) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_S2_PGD; i++) { - if (KVM_PREALLOC_LEVEL == 1) - pgd_populate(NULL, pgd + i, - (pud_t *)hwpgd + i * PTRS_PER_PUD); - else if (KVM_PREALLOC_LEVEL == 2) - pud_populate(NULL, pud_offset(pgd, 0) + i, - (pmd_t *)hwpgd + i * PTRS_PER_PMD); - } - - return 0; -} - static inline void *kvm_get_hwpgd(struct kvm *kvm) { pgd_t *pgd = kvm->arch.pgd; @@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm) return pmd_offset(pud, 0); } -static inline void kvm_free_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - if (KVM_PREALLOC_LEVEL > 0) { - unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); - free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); - } + if (KVM_PREALLOC_LEVEL > 0) + return PTRS_PER_S2_PGD * PAGE_SIZE; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } static inline bool kvm_page_empty(void *ptr) diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index c028fe37456f..53d9c354219f 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb) static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); pgtable_page_dtor(pte); tlb_remove_entry(tlb, pte); } @@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); tlb_remove_entry(tlb, virt_to_page(pmdp)); } #endif @@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); tlb_remove_entry(tlb, virt_to_page(pudp)); } #endif diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 4abe9b945f77..c3bb05b98616 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -144,6 +144,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end } /* + * Used to invalidate the TLB (walk caches) corresponding to intermediate page + * table levels (pgd/pud/pmd). + */ +static inline void __flush_tlb_pgtable(struct mm_struct *mm, + unsigned long uaddr) +{ + unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48); + + dsb(ishst); + asm("tlbi vae1is, %0" : : "r" (addr)); + dsb(ish); +} +/* * On AArch64, the cache coherency is handled via the set_pte_at() function. */ static inline void update_mmu_cache(struct vm_area_struct *vma, diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index b42c7b480e1e..2b8d70164428 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -354,3 +354,12 @@ void efi_virtmap_unload(void) efi_set_pgd(current->active_mm); preempt_enable(); } + +/* + * UpdateCapsule() depends on the system being shutdown via + * ResetSystem(). + */ +bool efi_poweroff_required(void) +{ + return efi_enabled(EFI_RUNTIME_SERVICES); +} diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 8ce88e08c030..07f930540f4a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -585,8 +585,8 @@ ENDPROC(set_cpu_boot_mode_flag) * zeroing of .bss would clobber it. */ .pushsection .data..cacheline_aligned -ENTRY(__boot_cpu_mode) .align L1_CACHE_SHIFT +ENTRY(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 .long 0 .popsection diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fde9923af859..c6b1f3b96f45 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -21,6 +21,7 @@ #include <stdarg.h> #include <linux/compat.h> +#include <linux/efi.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -150,6 +151,13 @@ void machine_restart(char *cmd) local_irq_disable(); smp_send_stop(); + /* + * UpdateCapsule() depends on the system being reset via + * ResetSystem(). + */ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + efi_reboot(reboot_mode, NULL); + /* Now call the architecture specific reboot code. */ if (arm_pm_restart) arm_pm_restart(reboot_mode, cmd); diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h index 78d4483ba40c..ec4db6df5e0d 100644 --- a/arch/c6x/include/asm/pgtable.h +++ b/arch/c6x/include/asm/pgtable.h @@ -67,6 +67,11 @@ extern unsigned long empty_zero_page; */ #define pgtable_cache_init() do { } while (0) +/* + * c6x is !MMU, so define the simpliest implementation + */ +#define pgprot_writecombine pgprot_noncached + #include <asm-generic/pgtable.h> #endif /* _ASM_C6X_PGTABLE_H */ diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 0536bc021cc6..ef548510b951 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S @@ -348,8 +348,9 @@ C_ENTRY(_user_exception): * The LP register should point to the location where the called function * should return. [note that MAKE_SYS_CALL uses label 1] */ /* See if the system call number is valid */ + blti r12, 5f addi r11, r12, -__NR_syscalls; - bgei r11,5f; + bgei r11, 5f; /* Figure out which function to use for this system call. */ /* Note Microblaze barrel shift is optional, so don't rely on it */ add r12, r12, r12; /* convert num -> ptr */ @@ -375,7 +376,7 @@ C_ENTRY(_user_exception): /* The syscall number is invalid, return an error. */ 5: - rtsd r15, 8; /* looks like a normal subroutine return */ + braid ret_from_trap addi r3, r0, -ENOSYS; /* Entry point used to return from a syscall/trap */ @@ -411,7 +412,7 @@ C_ENTRY(ret_from_trap): bri 1b /* Maybe handle a signal */ -5: +5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; beqi r11, 4f; /* Signals to handle, handle them */ diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h index 20fb1cf2dab6..642462144872 100644 --- a/arch/nios2/include/asm/ptrace.h +++ b/arch/nios2/include/asm/ptrace.h @@ -15,7 +15,54 @@ #include <uapi/asm/ptrace.h> +/* This struct defines the way the registers are stored on the + stack during a system call. */ + #ifndef __ASSEMBLY__ +struct pt_regs { + unsigned long r8; /* r8-r15 Caller-saved GP registers */ + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long r1; /* Assembler temporary */ + unsigned long r2; /* Retval LS 32bits */ + unsigned long r3; /* Retval MS 32bits */ + unsigned long r4; /* r4-r7 Register arguments */ + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long orig_r2; /* Copy of r2 ?? */ + unsigned long ra; /* Return address */ + unsigned long fp; /* Frame pointer */ + unsigned long sp; /* Stack pointer */ + unsigned long gp; /* Global pointer */ + unsigned long estatus; + unsigned long ea; /* Exception return address (pc) */ + unsigned long orig_r7; +}; + +/* + * This is the extended stack used by signal handlers and the context + * switcher: it's pushed after the normal "struct pt_regs". + */ +struct switch_stack { + unsigned long r16; /* r16-r23 Callee-saved GP registers */ + unsigned long r17; + unsigned long r18; + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + unsigned long r23; + unsigned long fp; + unsigned long gp; + unsigned long ra; +}; + #define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) #define instruction_pointer(regs) ((regs)->ra) diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h deleted file mode 100644 index 2c87614b0f6e..000000000000 --- a/arch/nios2/include/asm/ucontext.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) 2010 Tobias Klauser <[email protected]> - * Copyright (C) 2004 Microtronix Datacom Ltd - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _ASM_NIOS2_UCONTEXT_H -#define _ASM_NIOS2_UCONTEXT_H - -typedef int greg_t; -#define NGREG 32 -typedef greg_t gregset_t[NGREG]; - -struct mcontext { - int version; - gregset_t gregs; -}; - -#define MCONTEXT_VERSION 2 - -struct ucontext { - unsigned long uc_flags; - struct ucontext *uc_link; - stack_t uc_stack; - struct mcontext uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ -}; - -#endif diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild index 4f07ca3f8d10..e0bb972a50d7 100644 --- a/arch/nios2/include/uapi/asm/Kbuild +++ b/arch/nios2/include/uapi/asm/Kbuild @@ -1,4 +1,5 @@ include include/uapi/asm-generic/Kbuild.asm header-y += elf.h -header-y += ucontext.h + +generic-y += ucontext.h diff --git a/arch/nios2/include/uapi/asm/elf.h b/arch/nios2/include/uapi/asm/elf.h index a5b91ae5cf56..6f06d3b2949e 100644 --- a/arch/nios2/include/uapi/asm/elf.h +++ b/arch/nios2/include/uapi/asm/elf.h @@ -50,9 +50,7 @@ typedef unsigned long elf_greg_t; -#define ELF_NGREG \ - ((sizeof(struct pt_regs) + sizeof(struct switch_stack)) / \ - sizeof(elf_greg_t)) +#define ELF_NGREG 49 typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef unsigned long elf_fpregset_t; diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h index e83a7c9d1c36..71a330597adf 100644 --- a/arch/nios2/include/uapi/asm/ptrace.h +++ b/arch/nios2/include/uapi/asm/ptrace.h @@ -67,53 +67,9 @@ #define NUM_PTRACE_REG (PTR_TLBMISC + 1) -/* this struct defines the way the registers are stored on the - stack during a system call. - - There is a fake_regs in setup.c that has to match pt_regs.*/ - -struct pt_regs { - unsigned long r8; /* r8-r15 Caller-saved GP registers */ - unsigned long r9; - unsigned long r10; - unsigned long r11; - unsigned long r12; - unsigned long r13; - unsigned long r14; - unsigned long r15; - unsigned long r1; /* Assembler temporary */ - unsigned long r2; /* Retval LS 32bits */ - unsigned long r3; /* Retval MS 32bits */ - unsigned long r4; /* r4-r7 Register arguments */ - unsigned long r5; - unsigned long r6; - unsigned long r7; - unsigned long orig_r2; /* Copy of r2 ?? */ - unsigned long ra; /* Return address */ - unsigned long fp; /* Frame pointer */ - unsigned long sp; /* Stack pointer */ - unsigned long gp; /* Global pointer */ - unsigned long estatus; - unsigned long ea; /* Exception return address (pc) */ - unsigned long orig_r7; -}; - -/* - * This is the extended stack used by signal handlers and the context - * switcher: it's pushed after the normal "struct pt_regs". - */ -struct switch_stack { - unsigned long r16; /* r16-r23 Callee-saved GP registers */ - unsigned long r17; - unsigned long r18; - unsigned long r19; - unsigned long r20; - unsigned long r21; - unsigned long r22; - unsigned long r23; - unsigned long fp; - unsigned long gp; - unsigned long ra; +/* User structures for general purpose registers. */ +struct user_pt_regs { + __u32 regs[49]; }; #endif /* __ASSEMBLY__ */ diff --git a/arch/nios2/include/uapi/asm/sigcontext.h b/arch/nios2/include/uapi/asm/sigcontext.h index 7b8bb41867d4..b67944a50927 100644 --- a/arch/nios2/include/uapi/asm/sigcontext.h +++ b/arch/nios2/include/uapi/asm/sigcontext.h @@ -15,14 +15,16 @@ * details. */ -#ifndef _ASM_NIOS2_SIGCONTEXT_H -#define _ASM_NIOS2_SIGCONTEXT_H +#ifndef _UAPI__ASM_SIGCONTEXT_H +#define _UAPI__ASM_SIGCONTEXT_H -#include <asm/ptrace.h> +#include <linux/types.h> + +#define MCONTEXT_VERSION 2 struct sigcontext { - struct pt_regs regs; - unsigned long sc_mask; /* old sigmask */ + int version; + unsigned long gregs[32]; }; #endif diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index 2d0ea25be171..dda41e4fe707 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c @@ -39,7 +39,7 @@ static inline int rt_restore_ucontext(struct pt_regs *regs, struct ucontext *uc, int *pr2) { int temp; - greg_t *gregs = uc->uc_mcontext.gregs; + unsigned long *gregs = uc->uc_mcontext.gregs; int err; /* Always make any pending restarted system calls return -EINTR */ @@ -127,7 +127,7 @@ badframe: static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) { struct switch_stack *sw = (struct switch_stack *)regs - 1; - greg_t *gregs = uc->uc_mcontext.gregs; + unsigned long *gregs = uc->uc_mcontext.gregs; int err = 0; err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 0d231adfe576..0c9b6afe69e9 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -126,7 +126,6 @@ good_area: break; } -survive: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo @@ -220,11 +219,6 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (is_global_init(tsk)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f6579cfde2df..19e17bd7aec0 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -165,7 +165,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ONE_REG: case KVM_CAP_ENABLE_CAP: case KVM_CAP_S390_CSS_SUPPORT: - case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: case KVM_CAP_ENABLE_CAP_VM: diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 96ac69c5eba0..efb00ec75805 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -86,6 +86,9 @@ config ARCH_DEFCONFIG default "arch/sparc/configs/sparc32_defconfig" if SPARC32 default "arch/sparc/configs/sparc64_defconfig" if SPARC64 +config ARCH_PROC_KCORE_TEXT + def_bool y + config IOMMU_HELPER bool default y if SPARC64 diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index 9b672be70dda..50d4840d9aeb 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -407,16 +407,16 @@ static inline void iounmap(volatile void __iomem *addr) { } -#define ioread8(X) readb(X) -#define ioread16(X) readw(X) -#define ioread16be(X) __raw_readw(X) -#define ioread32(X) readl(X) -#define ioread32be(X) __raw_readl(X) -#define iowrite8(val,X) writeb(val,X) -#define iowrite16(val,X) writew(val,X) -#define iowrite16be(val,X) __raw_writew(val,X) -#define iowrite32(val,X) writel(val,X) -#define iowrite32be(val,X) __raw_writel(val,X) +#define ioread8 readb +#define ioread16 readw +#define ioread16be __raw_readw +#define ioread32 readl +#define ioread32be __raw_readl +#define iowrite8 writeb +#define iowrite16 writew +#define iowrite16be __raw_writew +#define iowrite32 writel +#define iowrite32be __raw_writel /* Create a virtual mapping cookie for an IO port range */ void __iomem *ioport_map(unsigned long port, unsigned int nr); diff --git a/arch/sparc/include/asm/starfire.h b/arch/sparc/include/asm/starfire.h index c100dc27a0a9..176fa0ad19f1 100644 --- a/arch/sparc/include/asm/starfire.h +++ b/arch/sparc/include/asm/starfire.h @@ -12,7 +12,6 @@ extern int this_is_starfire; void check_if_starfire(void); -int starfire_hard_smp_processor_id(void); void starfire_hookup(int); unsigned int starfire_translate(unsigned long imap, unsigned int upaid); diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 88d322b67fac..07cc49e541f4 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -98,11 +98,7 @@ void sun4v_do_mna(struct pt_regs *regs, void do_privop(struct pt_regs *regs); void do_privact(struct pt_regs *regs); void do_cee(struct pt_regs *regs); -void do_cee_tl1(struct pt_regs *regs); -void do_dae_tl1(struct pt_regs *regs); -void do_iae_tl1(struct pt_regs *regs); void do_div0_tl1(struct pt_regs *regs); -void do_fpdis_tl1(struct pt_regs *regs); void do_fpieee_tl1(struct pt_regs *regs); void do_fpother_tl1(struct pt_regs *regs); void do_ill_tl1(struct pt_regs *regs); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index da6f1a7fc4db..61139d9924ca 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1406,11 +1406,32 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) scheduler_ipi(); } -/* This is a nop because we capture all other cpus - * anyways when making the PROM active. - */ +static void stop_this_cpu(void *dummy) +{ + prom_stopself(); +} + void smp_send_stop(void) { + int cpu; + + if (tlb_type == hypervisor) { + for_each_online_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; +#ifdef CONFIG_SUN_LDOMS + if (ldom_domaining_enabled) { + unsigned long hv_err; + hv_err = sun4v_cpu_stop(cpu); + if (hv_err) + printk(KERN_ERR "sun4v_cpu_stop() " + "failed err=%lu\n", hv_err); + } else +#endif + prom_stopcpu_cpuid(cpu); + } + } else + smp_call_function(stop_this_cpu, NULL, 0); } /** diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c index 82281a566bb8..167fdfd9c837 100644 --- a/arch/sparc/kernel/starfire.c +++ b/arch/sparc/kernel/starfire.c @@ -28,11 +28,6 @@ void check_if_starfire(void) this_is_starfire = 1; } -int starfire_hard_smp_processor_id(void) -{ - return upa_readl(0x1fff40000d0UL); -} - /* * Each Starfire board has 32 registers which perform translation * and delivery of traditional interrupt packets into the extended diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index c85403d0496c..30e7ddb27a3a 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -333,7 +333,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second long err; /* No need for backward compatibility. We can start fresh... */ - if (call <= SEMCTL) { + if (call <= SEMTIMEDOP) { switch (call) { case SEMOP: err = sys_semtimedop(first, ptr, diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index a27651e866e7..0e699745d643 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2427,6 +2427,8 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs) } user_instruction_dump ((unsigned int __user *) regs->tpc); } + if (panic_on_oops) + panic("Fatal exception"); if (regs->tstate & TSTATE_PRIV) do_exit(SIGKILL); do_exit(SIGSEGV); @@ -2564,27 +2566,6 @@ void do_cee(struct pt_regs *regs) die_if_kernel("TL0: Cache Error Exception", regs); } -void do_cee_tl1(struct pt_regs *regs) -{ - exception_enter(); - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - die_if_kernel("TL1: Cache Error Exception", regs); -} - -void do_dae_tl1(struct pt_regs *regs) -{ - exception_enter(); - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - die_if_kernel("TL1: Data Access Exception", regs); -} - -void do_iae_tl1(struct pt_regs *regs) -{ - exception_enter(); - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - die_if_kernel("TL1: Instruction Access Exception", regs); -} - void do_div0_tl1(struct pt_regs *regs) { exception_enter(); @@ -2592,13 +2573,6 @@ void do_div0_tl1(struct pt_regs *regs) die_if_kernel("TL1: DIV0 Exception", regs); } -void do_fpdis_tl1(struct pt_regs *regs) -{ - exception_enter(); - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - die_if_kernel("TL1: FPU Disabled", regs); -} - void do_fpieee_tl1(struct pt_regs *regs) { exception_enter(); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3ea267c53320..4ca0d6ba5ec8 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2820,7 +2820,7 @@ static int __init report_memory(void) return 0; } -device_initcall(report_memory); +arch_initcall(report_memory); #ifdef CONFIG_SMP #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index 7083c16cccba..bb1376381985 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c @@ -14,13 +14,6 @@ static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; -struct kaslr_setup_data { - __u64 next; - __u32 type; - __u32 len; - __u8 data[1]; -} kaslr_setup_data; - #define I8254_PORT_CONTROL 0x43 #define I8254_PORT_COUNTER0 0x40 #define I8254_CMD_READBACK 0xC0 @@ -302,29 +295,7 @@ static unsigned long find_random_addr(unsigned long minimum, return slots_fetch_random(); } -static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled) -{ - struct setup_data *data; - - kaslr_setup_data.type = SETUP_KASLR; - kaslr_setup_data.len = 1; - kaslr_setup_data.next = 0; - kaslr_setup_data.data[0] = enabled; - - data = (struct setup_data *)(unsigned long)params->hdr.setup_data; - - while (data && data->next) - data = (struct setup_data *)(unsigned long)data->next; - - if (data) - data->next = (unsigned long)&kaslr_setup_data; - else - params->hdr.setup_data = (unsigned long)&kaslr_setup_data; - -} - -unsigned char *choose_kernel_location(struct boot_params *params, - unsigned char *input, +unsigned char *choose_kernel_location(unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size) @@ -335,17 +306,14 @@ unsigned char *choose_kernel_location(struct boot_params *params, #ifdef CONFIG_HIBERNATION if (!cmdline_find_option_bool("kaslr")) { debug_putstr("KASLR disabled by default...\n"); - add_kaslr_setup_data(params, 0); goto out; } #else if (cmdline_find_option_bool("nokaslr")) { debug_putstr("KASLR disabled by cmdline...\n"); - add_kaslr_setup_data(params, 0); goto out; } #endif - add_kaslr_setup_data(params, 1); /* Record the various known unsafe memory ranges. */ mem_avoid_init((unsigned long)input, input_size, diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 5903089c818f..a950864a64da 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -401,8 +401,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, * the entire decompressed kernel plus relocation table, or the * entire decompressed kernel plus .bss and .brk sections. */ - output = choose_kernel_location(real_mode, input_data, input_len, - output, + output = choose_kernel_location(input_data, input_len, output, output_len > run_size ? output_len : run_size); diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index ee3576b2666b..04477d68403f 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -57,8 +57,7 @@ int cmdline_find_option_bool(const char *option); #if CONFIG_RANDOMIZE_BASE /* aslr.c */ -unsigned char *choose_kernel_location(struct boot_params *params, - unsigned char *input, +unsigned char *choose_kernel_location(unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size); @@ -66,8 +65,7 @@ unsigned char *choose_kernel_location(struct boot_params *params, bool has_cpuflag(int flag); #else static inline -unsigned char *choose_kernel_location(struct boot_params *params, - unsigned char *input, +unsigned char *choose_kernel_location(unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 947c6bf52c33..54f60ab41c63 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1155,7 +1155,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); if (!src) return -ENOMEM; - assoc = (src + req->cryptlen + auth_tag_len); + assoc = (src + req->cryptlen); scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); scatterwalk_map_and_copy(assoc, req->assoc, 0, req->assoclen, 0); @@ -1180,7 +1180,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0); } else { - scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); + scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1); kfree(src); } return retval; diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 0dbc08282291..72ba21a8b5fc 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk) preempt_disable(); tsk->thread.fpu_counter = 0; __drop_fpu(tsk); - clear_used_math(); + clear_stopped_child_used_math(tsk); preempt_enable(); } diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 95e11f79f123..f97fbe3abb67 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -51,8 +51,6 @@ extern int devmem_is_allowed(unsigned long pagenr); extern unsigned long max_low_pfn_mapped; extern unsigned long max_pfn_mapped; -extern bool kaslr_enabled; - static inline phys_addr_t get_max_mapped(void) { return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 44e6dd7e36a2..225b0988043a 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -7,7 +7,6 @@ #define SETUP_DTB 2 #define SETUP_PCI 3 #define SETUP_EFI 4 -#define SETUP_KASLR 5 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3d525c6124f6..803b684676ff 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1338,6 +1338,26 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) } /* + * ACPI offers an alternative platform interface model that removes + * ACPI hardware requirements for platforms that do not implement + * the PC Architecture. + * + * We initialize the Hardware-reduced ACPI model here: + */ +static void __init acpi_reduced_hw_init(void) +{ + if (acpi_gbl_reduced_hardware) { + /* + * Override x86_init functions and bypass legacy pic + * in Hardware-reduced ACPI mode + */ + x86_init.timers.timer_init = x86_init_noop; + x86_init.irqs.pre_vector_init = x86_init_noop; + legacy_pic = &null_legacy_pic; + } +} + +/* * If your system is blacklisted here, but you find that acpi=force * works for you, please contact [email protected] */ @@ -1536,6 +1556,11 @@ int __init early_acpi_boot_init(void) */ early_acpi_process_madt(); + /* + * Hardware-reduced ACPI mode initialization: + */ + acpi_reduced_hw_init(); + return 0; } diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index c2fd21fed002..017149cded07 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -37,10 +37,12 @@ static const struct apic apic_numachip; static unsigned int get_apic_id(unsigned long x) { unsigned long value; - unsigned int id; + unsigned int id = (x >> 24) & 0xff; - rdmsrl(MSR_FAM10H_NODE_ID, value); - id = ((x >> 24) & 0xffU) | ((value << 2) & 0xff00U); + if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { + rdmsrl(MSR_FAM10H_NODE_ID, value); + id |= (value << 2) & 0xff00; + } return id; } @@ -155,10 +157,18 @@ static int __init numachip_probe(void) static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) { - if (c->phys_proc_id != node) { - c->phys_proc_id = node; - per_cpu(cpu_llc_id, smp_processor_id()) = node; + u64 val; + u32 nodes = 1; + + this_cpu_write(cpu_llc_id, node); + + /* Account for nodes per socket in multi-core-module processors */ + if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { + rdmsrl(MSR_FAM10H_NODE_ID, val); + nodes = ((val >> 3) & 7) + 1; } + + c->phys_proc_id = node / nodes; } static int __init numachip_system_init(void) diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 9bbb9b35c144..d1ac80b72c72 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -47,13 +47,21 @@ do { \ #ifdef CONFIG_RANDOMIZE_BASE static unsigned long module_load_offset; +static int randomize_modules = 1; /* Mutex protects the module_load_offset. */ static DEFINE_MUTEX(module_kaslr_mutex); +static int __init parse_nokaslr(char *p) +{ + randomize_modules = 0; + return 0; +} +early_param("nokaslr", parse_nokaslr); + static unsigned long int get_module_load_offset(void) { - if (kaslr_enabled) { + if (randomize_modules) { mutex_lock(&module_kaslr_mutex); /* * Calculate the module_load_offset the first time this diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 98dc9317286e..0a2421cca01f 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -122,8 +122,6 @@ unsigned long max_low_pfn_mapped; unsigned long max_pfn_mapped; -bool __read_mostly kaslr_enabled = false; - #ifdef CONFIG_DMI RESERVE_BRK(dmi_alloc, 65536); #endif @@ -427,11 +425,6 @@ static void __init reserve_initrd(void) } #endif /* CONFIG_BLK_DEV_INITRD */ -static void __init parse_kaslr_setup(u64 pa_data, u32 data_len) -{ - kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data)); -} - static void __init parse_setup_data(void) { struct setup_data *data; @@ -457,9 +450,6 @@ static void __init parse_setup_data(void) case SETUP_EFI: parse_efi_setup(pa_data, data_len); break; - case SETUP_KASLR: - parse_kaslr_setup(pa_data, data_len); - break; default: break; } @@ -842,14 +832,10 @@ static void __init trim_low_memory_range(void) static int dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) { - if (kaslr_enabled) - pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", - (unsigned long)&_text - __START_KERNEL, - __START_KERNEL, - __START_KERNEL_map, - MODULES_VADDR-1); - else - pr_emerg("Kernel Offset: disabled\n"); + pr_emerg("Kernel Offset: 0x%lx from 0x%lx " + "(relocation range: 0x%lx-0x%lx)\n", + (unsigned long)&_text - __START_KERNEL, __START_KERNEL, + __START_KERNEL_map, MODULES_VADDR-1); return 0; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 9d2073e2ecc9..4ff5d162ff9f 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) goto exit; conditional_sti(regs); - if (!user_mode(regs)) + if (!user_mode_vm(regs)) die("bounds", regs, error_code); if (!cpu_feature_enabled(X86_FEATURE_MPX)) { @@ -637,7 +637,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) * then it's very likely the result of an icebp/int01 trap. * User wants a sigtrap for that. */ - if (!dr6 && user_mode(regs)) + if (!dr6 && user_mode_vm(regs)) user_icebp = 1; /* Catch kmemcheck conditions first of all! */ diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 34f66e58a896..cdc6cf903078 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -379,7 +379,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) * thread's fpu state, reconstruct fxstate from the fsave * header. Sanitize the copied state etc. */ - struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; + struct fpu *fpu = &tsk->thread.fpu; struct user_i387_ia32_struct env; int err = 0; @@ -393,14 +393,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) */ drop_fpu(tsk); - if (__copy_from_user(xsave, buf_fx, state_size) || + if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) || __copy_from_user(&env, buf, sizeof(env))) { + fpu_finit(fpu); err = -1; } else { sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); - set_used_math(); } + set_used_math(); if (use_eager_fpu()) { preempt_disable(); math_state_restore(); diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index cc31f7c06d3d..9541ba34126b 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -507,6 +507,7 @@ static int picdev_read(struct kvm_pic *s, return -EOPNOTSUPP; if (len != 1) { + memset(val, 0, len); pr_pic_unimpl("non byte read\n"); return 0; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f7b20b417a3a..10a481b7674d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2168,7 +2168,10 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) { unsigned long *msr_bitmap; - if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) { + if (is_guest_mode(vcpu)) + msr_bitmap = vmx_msr_bitmap_nested; + else if (irqchip_in_kernel(vcpu->kvm) && + apic_x2apic_mode(vcpu->arch.apic)) { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode_x2apic; else @@ -9218,9 +9221,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) } if (cpu_has_vmx_msr_bitmap() && - exec_control & CPU_BASED_USE_MSR_BITMAPS && - nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) { - vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested)); + exec_control & CPU_BASED_USE_MSR_BITMAPS) { + nested_vmx_merge_msr_bitmap(vcpu, vmcs12); + /* MSR_BITMAP will be set by following vmx_set_efer. */ } else exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bd7a70be41b3..32bf19ef3115 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2744,7 +2744,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: - case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD_NO_LENGTH: case KVM_CAP_PIT2: diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S index 31776d0efc8c..d7ec4e251c0a 100644 --- a/arch/x86/vdso/vdso32/sigreturn.S +++ b/arch/x86/vdso/vdso32/sigreturn.S @@ -17,6 +17,7 @@ .text .globl __kernel_sigreturn .type __kernel_sigreturn,@function + nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ ALIGN __kernel_sigreturn: .LSTART_sigreturn: diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 740ae3026a14..9f93af56a5fc 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -563,7 +563,7 @@ static bool alloc_p2m(unsigned long pfn) if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) p2m_init(p2m); else - p2m_init_identity(p2m, pfn); + p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1)); spin_lock_irqsave(&p2m_update_lock, flags); diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 7f8b7edcadca..26089d182cb7 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -358,8 +358,8 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; if (WARN_ON(npages == 0)) return -EINVAL; - - sg_init_table(sgl->sg, npages); + /* Add one extra for linking */ + sg_init_table(sgl->sg, npages + 1); for (i = 0, len = n; i < npages; i++) { int plen = min_t(int, len, PAGE_SIZE - off); @@ -369,18 +369,26 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) off = 0; len -= plen; } + sg_mark_end(sgl->sg + npages - 1); + sgl->npages = npages; + return n; } EXPORT_SYMBOL_GPL(af_alg_make_sg); +void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new) +{ + sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); + sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); +} +EXPORT_SYMBOL(af_alg_link_sg); + void af_alg_free_sg(struct af_alg_sgl *sgl) { int i; - i = 0; - do { + for (i = 0; i < sgl->npages; i++) put_page(sgl->pages[i]); - } while (!sg_is_last(sgl->sg + (i++))); } EXPORT_SYMBOL_GPL(af_alg_free_sg); diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index b9743dc35801..8276f21ea7be 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -39,6 +39,7 @@ struct skcipher_ctx { struct af_alg_completion completion; + atomic_t inflight; unsigned used; unsigned int len; @@ -49,9 +50,65 @@ struct skcipher_ctx { struct ablkcipher_request req; }; +struct skcipher_async_rsgl { + struct af_alg_sgl sgl; + struct list_head list; +}; + +struct skcipher_async_req { + struct kiocb *iocb; + struct skcipher_async_rsgl first_sgl; + struct list_head list; + struct scatterlist *tsg; + char iv[]; +}; + +#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \ + crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))) + +#define GET_REQ_SIZE(ctx) \ + crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)) + +#define GET_IV_SIZE(ctx) \ + crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req)) + #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ sizeof(struct scatterlist) - 1) +static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) +{ + struct skcipher_async_rsgl *rsgl, *tmp; + struct scatterlist *sgl; + struct scatterlist *sg; + int i, n; + + list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) { + af_alg_free_sg(&rsgl->sgl); + if (rsgl != &sreq->first_sgl) + kfree(rsgl); + } + sgl = sreq->tsg; + n = sg_nents(sgl); + for_each_sg(sgl, sg, n, i) + put_page(sg_page(sg)); + + kfree(sreq->tsg); +} + +static void skcipher_async_cb(struct crypto_async_request *req, int err) +{ + struct sock *sk = req->data; + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + struct skcipher_async_req *sreq = GET_SREQ(req, ctx); + struct kiocb *iocb = sreq->iocb; + + atomic_dec(&ctx->inflight); + skcipher_free_async_sgls(sreq); + kfree(req); + aio_complete(iocb, err, err); +} + static inline int skcipher_sndbuf(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); @@ -96,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk) return 0; } -static void skcipher_pull_sgl(struct sock *sk, int used) +static void skcipher_pull_sgl(struct sock *sk, int used, int put) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; @@ -123,8 +180,8 @@ static void skcipher_pull_sgl(struct sock *sk, int used) if (sg[i].length) return; - - put_page(sg_page(sg + i)); + if (put) + put_page(sg_page(sg + i)); sg_assign_page(sg + i, NULL); } @@ -143,7 +200,7 @@ static void skcipher_free_sgl(struct sock *sk) struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; - skcipher_pull_sgl(sk, ctx->used); + skcipher_pull_sgl(sk, ctx->used, 1); } static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) @@ -424,8 +481,149 @@ unlock: return err ?: size; } -static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, - size_t ignored, int flags) +static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) +{ + struct skcipher_sg_list *sgl; + struct scatterlist *sg; + int nents = 0; + + list_for_each_entry(sgl, &ctx->tsgl, list) { + sg = sgl->sg; + + while (!sg->length) + sg++; + + nents += sg_nents(sg); + } + return nents; +} + +static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, + int flags) +{ + struct sock *sk = sock->sk; + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + struct skcipher_sg_list *sgl; + struct scatterlist *sg; + struct skcipher_async_req *sreq; + struct ablkcipher_request *req; + struct skcipher_async_rsgl *last_rsgl = NULL; + unsigned int len = 0, tx_nents = skcipher_all_sg_nents(ctx); + unsigned int reqlen = sizeof(struct skcipher_async_req) + + GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); + int i = 0; + int err = -ENOMEM; + + lock_sock(sk); + req = kmalloc(reqlen, GFP_KERNEL); + if (unlikely(!req)) + goto unlock; + + sreq = GET_SREQ(req, ctx); + sreq->iocb = msg->msg_iocb; + memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl)); + INIT_LIST_HEAD(&sreq->list); + sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); + if (unlikely(!sreq->tsg)) { + kfree(req); + goto unlock; + } + sg_init_table(sreq->tsg, tx_nents); + memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); + ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req)); + ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + skcipher_async_cb, sk); + + while (iov_iter_count(&msg->msg_iter)) { + struct skcipher_async_rsgl *rsgl; + unsigned long used; + + if (!ctx->used) { + err = skcipher_wait_for_data(sk, flags); + if (err) + goto free; + } + sgl = list_first_entry(&ctx->tsgl, + struct skcipher_sg_list, list); + sg = sgl->sg; + + while (!sg->length) + sg++; + + used = min_t(unsigned long, ctx->used, + iov_iter_count(&msg->msg_iter)); + used = min_t(unsigned long, used, sg->length); + + if (i == tx_nents) { + struct scatterlist *tmp; + int x; + /* Ran out of tx slots in async request + * need to expand */ + tmp = kcalloc(tx_nents * 2, sizeof(*tmp), + GFP_KERNEL); + if (!tmp) + goto free; + + sg_init_table(tmp, tx_nents * 2); + for (x = 0; x < tx_nents; x++) + sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]), + sreq->tsg[x].length, + sreq->tsg[x].offset); + kfree(sreq->tsg); + sreq->tsg = tmp; + tx_nents *= 2; + } + /* Need to take over the tx sgl from ctx + * to the asynch req - these sgls will be freed later */ + sg_set_page(sreq->tsg + i++, sg_page(sg), sg->length, + sg->offset); + + if (list_empty(&sreq->list)) { + rsgl = &sreq->first_sgl; + list_add_tail(&rsgl->list, &sreq->list); + } else { + rsgl = kzalloc(sizeof(*rsgl), GFP_KERNEL); + if (!rsgl) { + err = -ENOMEM; + goto free; + } + list_add_tail(&rsgl->list, &sreq->list); + } + + used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); + err = used; + if (used < 0) + goto free; + if (last_rsgl) + af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); + + last_rsgl = rsgl; + len += used; + skcipher_pull_sgl(sk, used, 0); + iov_iter_advance(&msg->msg_iter, used); + } + + ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, + len, sreq->iv); + err = ctx->enc ? crypto_ablkcipher_encrypt(req) : + crypto_ablkcipher_decrypt(req); + if (err == -EINPROGRESS) { + atomic_inc(&ctx->inflight); + err = -EIOCBQUEUED; + goto unlock; + } +free: + skcipher_free_async_sgls(sreq); + kfree(req); +unlock: + skcipher_wmem_wakeup(sk); + release_sock(sk); + return err; +} + +static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, + int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); @@ -484,7 +682,7 @@ free: goto unlock; copied += used; - skcipher_pull_sgl(sk, used); + skcipher_pull_sgl(sk, used, 1); iov_iter_advance(&msg->msg_iter, used); } @@ -497,6 +695,13 @@ unlock: return copied ?: err; } +static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, + size_t ignored, int flags) +{ + return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? + skcipher_recvmsg_async(sock, msg, flags) : + skcipher_recvmsg_sync(sock, msg, flags); +} static unsigned int skcipher_poll(struct file *file, struct socket *sock, poll_table *wait) @@ -555,12 +760,25 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) return crypto_ablkcipher_setkey(private, key, keylen); } +static void skcipher_wait(struct sock *sk) +{ + struct alg_sock *ask = alg_sk(sk); + struct skcipher_ctx *ctx = ask->private; + int ctr = 0; + + while (atomic_read(&ctx->inflight) && ctr++ < 100) + msleep(100); +} + static void skcipher_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct skcipher_ctx *ctx = ask->private; struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); + if (atomic_read(&ctx->inflight)) + skcipher_wait(sk); + skcipher_free_sgl(sk); sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); sock_kfree_s(sk, ctx, ctx->len); @@ -592,6 +810,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk) ctx->more = 0; ctx->merge = 0; ctx->enc = 0; + atomic_set(&ctx->inflight, 0); af_alg_init_completion(&ctx->completion); ask->private = ctx; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 657964e8ab7e..37fb19047603 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -65,6 +65,7 @@ struct lpss_private_data; struct lpss_device_desc { unsigned int flags; + const char *clk_con_id; unsigned int prv_offset; size_t prv_size_override; void (*setup)(struct lpss_private_data *pdata); @@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = { static struct lpss_device_desc lpt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, + .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, }; @@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = { static struct lpss_device_desc byt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, + .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, }; @@ -313,7 +316,7 @@ out: return PTR_ERR(clk); pdata->clk = clk; - clk_register_clkdev(clk, NULL, devname); + clk_register_clkdev(clk, dev_desc->clk_con_id, devname); return 0; } diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index d453a2c98ad0..81751a49d8bf 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -307,7 +307,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, if (pos == 0) { memmove(blk + offset * map->cache_word_size, blk, rbnode->blklen * map->cache_word_size); - bitmap_shift_right(present, present, offset, blklen); + bitmap_shift_left(present, present, offset, blklen); } /* update the rbnode block, its size and the base register */ diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index f373c35f9e1d..da84f544c544 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -608,7 +608,8 @@ static int regcache_sync_block_single(struct regmap *map, void *block, for (i = start; i < end; i++) { regtmp = block_base + (i * map->reg_stride); - if (!regcache_reg_present(cache_present, i)) + if (!regcache_reg_present(cache_present, i) || + !regmap_writeable(map, regtmp)) continue; val = regcache_get_val(map, block, i); @@ -677,7 +678,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block, for (i = start; i < end; i++) { regtmp = block_base + (i * map->reg_stride); - if (!regcache_reg_present(cache_present, i)) { + if (!regcache_reg_present(cache_present, i) || + !regmap_writeable(map, regtmp)) { ret = regcache_sync_block_raw_flush(map, &data, base, regtmp); if (ret != 0) diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 6299a50a5960..a6c3f75b4b01 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -499,7 +499,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, goto err_alloc; } - ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, + ret = request_threaded_irq(irq, NULL, regmap_irq_thread, + irq_flags | IRQF_ONESHOT, chip->name, d); if (ret != 0) { dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 8c1bf6190533..708b6574d805 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -52,6 +52,7 @@ static struct usb_driver btusb_driver; #define BTUSB_SWAVE 0x1000 #define BTUSB_INTEL_NEW 0x2000 #define BTUSB_AMP 0x4000 +#define BTUSB_QCA_ROME 0x8000 static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ @@ -213,6 +214,10 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, + /* QCA ROME chipset */ + { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, + /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, @@ -338,6 +343,8 @@ struct btusb_data { int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb); int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); + + int (*setup_on_usb)(struct hci_dev *hdev); }; static inline void btusb_free_frags(struct btusb_data *data) @@ -879,6 +886,15 @@ static int btusb_open(struct hci_dev *hdev) BT_DBG("%s", hdev->name); + /* Patching USB firmware files prior to starting any URBs of HCI path + * It is more safe to use USB bulk channel for downloading USB patch + */ + if (data->setup_on_usb) { + err = data->setup_on_usb(hdev); + if (err <0) + return err; + } + err = usb_autopm_get_interface(data->intf); if (err < 0) return err; @@ -1254,6 +1270,28 @@ static void btusb_waker(struct work_struct *work) usb_autopm_put_interface(data->intf); } +static struct sk_buff *btusb_read_local_version(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", + hdev->name, PTR_ERR(skb)); + return skb; + } + + if (skb->len != sizeof(struct hci_rp_read_local_version)) { + BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", + hdev->name); + kfree_skb(skb); + return ERR_PTR(-EIO); + } + + return skb; +} + static int btusb_setup_bcm92035(struct hci_dev *hdev) { struct sk_buff *skb; @@ -1278,12 +1316,9 @@ static int btusb_setup_csr(struct hci_dev *hdev) BT_DBG("%s", hdev->name); - skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, - HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("Reading local version failed (%ld)", -PTR_ERR(skb)); + skb = btusb_read_local_version(hdev); + if (IS_ERR(skb)) return -PTR_ERR(skb); - } rp = (struct hci_rp_read_local_version *)skb->data; @@ -2414,21 +2449,9 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) kfree_skb(skb); /* Read Local Version Info */ - skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, - HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - ret = PTR_ERR(skb); - BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", - hdev->name, ret); - return ret; - } - - if (skb->len != sizeof(*ver)) { - BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", - hdev->name); - kfree_skb(skb); - return -EIO; - } + skb = btusb_read_local_version(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); ver = (struct hci_rp_read_local_version *)skb->data; rev = le16_to_cpu(ver->hci_rev); @@ -2516,20 +2539,9 @@ reset_fw: kfree_skb(skb); /* Read Local Version Info */ - skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, - HCI_INIT_TIMEOUT); + skb = btusb_read_local_version(hdev); if (IS_ERR(skb)) { ret = PTR_ERR(skb); - BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", - hdev->name, ret); - goto done; - } - - if (skb->len != sizeof(*ver)) { - BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", - hdev->name); - kfree_skb(skb); - ret = -EIO; goto done; } @@ -2628,6 +2640,258 @@ static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev, return 0; } +#define QCA_DFU_PACKET_LEN 4096 + +#define QCA_GET_TARGET_VERSION 0x09 +#define QCA_CHECK_STATUS 0x05 +#define QCA_DFU_DOWNLOAD 0x01 + +#define QCA_SYSCFG_UPDATED 0x40 +#define QCA_PATCH_UPDATED 0x80 +#define QCA_DFU_TIMEOUT 3000 + +struct qca_version { + __le32 rom_version; + __le32 patch_version; + __le32 ram_version; + __le32 ref_clock; + __u8 reserved[4]; +} __packed; + +struct qca_rampatch_version { + __le16 rom_version; + __le16 patch_version; +} __packed; + +struct qca_device_info { + u32 rom_version; + u8 rampatch_hdr; /* length of header in rampatch */ + u8 nvm_hdr; /* length of header in NVM */ + u8 ver_offset; /* offset of version structure in rampatch */ +}; + +static const struct qca_device_info qca_devices_table[] = { + { 0x00000100, 20, 4, 10 }, /* Rome 1.0 */ + { 0x00000101, 20, 4, 10 }, /* Rome 1.1 */ + { 0x00000201, 28, 4, 18 }, /* Rome 2.1 */ + { 0x00000300, 28, 4, 18 }, /* Rome 3.0 */ + { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */ +}; + +static int btusb_qca_send_vendor_req(struct hci_dev *hdev, u8 request, + void *data, u16 size) +{ + struct btusb_data *btdata = hci_get_drvdata(hdev); + struct usb_device *udev = btdata->udev; + int pipe, err; + u8 *buf; + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Found some of USB hosts have IOT issues with ours so that we should + * not wait until HCI layer is ready. + */ + pipe = usb_rcvctrlpipe(udev, 0); + err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN, + 0, 0, buf, size, USB_CTRL_SET_TIMEOUT); + if (err < 0) { + BT_ERR("%s: Failed to access otp area (%d)", hdev->name, err); + goto done; + } + + memcpy(data, buf, size); + +done: + kfree(buf); + + return err; +} + +static int btusb_setup_qca_download_fw(struct hci_dev *hdev, + const struct firmware *firmware, + size_t hdr_size) +{ + struct btusb_data *btdata = hci_get_drvdata(hdev); + struct usb_device *udev = btdata->udev; + size_t count, size, sent = 0; + int pipe, len, err; + u8 *buf; + + buf = kmalloc(QCA_DFU_PACKET_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + count = firmware->size; + + size = min_t(size_t, count, hdr_size); + memcpy(buf, firmware->data, size); + + /* USB patches should go down to controller through USB path + * because binary format fits to go down through USB channel. + * USB control path is for patching headers and USB bulk is for + * patch body. + */ + pipe = usb_sndctrlpipe(udev, 0); + err = usb_control_msg(udev, pipe, QCA_DFU_DOWNLOAD, USB_TYPE_VENDOR, + 0, 0, buf, size, USB_CTRL_SET_TIMEOUT); + if (err < 0) { + BT_ERR("%s: Failed to send headers (%d)", hdev->name, err); + goto done; + } + + sent += size; + count -= size; + + while (count) { + size = min_t(size_t, count, QCA_DFU_PACKET_LEN); + + memcpy(buf, firmware->data + sent, size); + + pipe = usb_sndbulkpipe(udev, 0x02); + err = usb_bulk_msg(udev, pipe, buf, size, &len, + QCA_DFU_TIMEOUT); + if (err < 0) { + BT_ERR("%s: Failed to send body at %zd of %zd (%d)", + hdev->name, sent, firmware->size, err); + break; + } + + if (size != len) { + BT_ERR("%s: Failed to get bulk buffer", hdev->name); + err = -EILSEQ; + break; + } + + sent += size; + count -= size; + } + +done: + kfree(buf); + return err; +} + +static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev, + struct qca_version *ver, + const struct qca_device_info *info) +{ + struct qca_rampatch_version *rver; + const struct firmware *fw; + u32 ver_rom, ver_patch; + u16 rver_rom, rver_patch; + char fwname[64]; + int err; + + ver_rom = le32_to_cpu(ver->rom_version); + ver_patch = le32_to_cpu(ver->patch_version); + + snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom); + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err) { + BT_ERR("%s: failed to request rampatch file: %s (%d)", + hdev->name, fwname, err); + return err; + } + + BT_INFO("%s: using rampatch file: %s", hdev->name, fwname); + + rver = (struct qca_rampatch_version *)(fw->data + info->ver_offset); + rver_rom = le16_to_cpu(rver->rom_version); + rver_patch = le16_to_cpu(rver->patch_version); + + BT_INFO("%s: QCA: patch rome 0x%x build 0x%x, firmware rome 0x%x " + "build 0x%x", hdev->name, rver_rom, rver_patch, ver_rom, + ver_patch); + + if (rver_rom != ver_rom || rver_patch <= ver_patch) { + BT_ERR("%s: rampatch file version did not match with firmware", + hdev->name); + err = -EINVAL; + goto done; + } + + err = btusb_setup_qca_download_fw(hdev, fw, info->rampatch_hdr); + +done: + release_firmware(fw); + + return err; +} + +static int btusb_setup_qca_load_nvm(struct hci_dev *hdev, + struct qca_version *ver, + const struct qca_device_info *info) +{ + const struct firmware *fw; + char fwname[64]; + int err; + + snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin", + le32_to_cpu(ver->rom_version)); + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err) { + BT_ERR("%s: failed to request NVM file: %s (%d)", + hdev->name, fwname, err); + return err; + } + + BT_INFO("%s: using NVM file: %s", hdev->name, fwname); + + err = btusb_setup_qca_download_fw(hdev, fw, info->nvm_hdr); + + release_firmware(fw); + + return err; +} + +static int btusb_setup_qca(struct hci_dev *hdev) +{ + const struct qca_device_info *info = NULL; + struct qca_version ver; + u32 ver_rom; + u8 status; + int i, err; + + err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver, + sizeof(ver)); + if (err < 0) + return err; + + ver_rom = le32_to_cpu(ver.rom_version); + for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) { + if (ver_rom == qca_devices_table[i].rom_version) + info = &qca_devices_table[i]; + } + if (!info) { + BT_ERR("%s: don't support firmware rome 0x%x", hdev->name, + ver_rom); + return -ENODEV; + } + + err = btusb_qca_send_vendor_req(hdev, QCA_CHECK_STATUS, &status, + sizeof(status)); + if (err < 0) + return err; + + if (!(status & QCA_PATCH_UPDATED)) { + err = btusb_setup_qca_load_rampatch(hdev, &ver, info); + if (err < 0) + return err; + } + + if (!(status & QCA_SYSCFG_UPDATED)) { + err = btusb_setup_qca_load_nvm(hdev, &ver, info); + if (err < 0) + return err; + } + + return 0; +} + static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -2755,6 +3019,7 @@ static int btusb_probe(struct usb_interface *intf, hdev->shutdown = btusb_shutdown_intel; hdev->set_bdaddr = btusb_set_bdaddr_intel; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); } if (id->driver_info & BTUSB_INTEL_NEW) { @@ -2778,9 +3043,15 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_ATH3012) { hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); } + if (id->driver_info & BTUSB_QCA_ROME) { + data->setup_on_usb = btusb_setup_qca; + hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + } + if (id->driver_info & BTUSB_AMP) { /* AMP controllers do not support SCO packets */ data->isoc = NULL; @@ -2816,6 +3087,8 @@ static int btusb_probe(struct usb_interface *intf, /* Fake CSR devices with broken commands */ if (bcdDevice <= 0x100) hdev->setup = btusb_setup_csr; + + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); } if (id->driver_info & BTUSB_SNIFFER) { diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index fae2dbbf5745..72d7028f779b 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -142,6 +142,7 @@ struct ports_device { * notification */ struct work_struct control_work; + struct work_struct config_work; struct list_head ports; @@ -1837,10 +1838,21 @@ static void config_intr(struct virtio_device *vdev) portdev = vdev->priv; + if (!use_multiport(portdev)) + schedule_work(&portdev->config_work); +} + +static void config_work_handler(struct work_struct *work) +{ + struct ports_device *portdev; + + portdev = container_of(work, struct ports_device, control_work); if (!use_multiport(portdev)) { + struct virtio_device *vdev; struct port *port; u16 rows, cols; + vdev = portdev->vdev; virtio_cread(vdev, struct virtio_console_config, cols, &cols); virtio_cread(vdev, struct virtio_console_config, rows, &rows); @@ -2040,12 +2052,14 @@ static int virtcons_probe(struct virtio_device *vdev) virtio_device_ready(portdev->vdev); + INIT_WORK(&portdev->config_work, &config_work_handler); + INIT_WORK(&portdev->control_work, &control_work_handler); + if (multiport) { unsigned int nr_added_bufs; spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ovq_lock); - INIT_WORK(&portdev->control_work, &control_work_handler); nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); @@ -2113,6 +2127,8 @@ static void virtcons_remove(struct virtio_device *vdev) /* Finish up work that's lined up */ if (use_multiport(portdev)) cancel_work_sync(&portdev->control_work); + else + cancel_work_sync(&portdev->config_work); list_for_each_entry_safe(port, port2, &portdev->ports, list) unplug_port(port); @@ -2164,6 +2180,7 @@ static int virtcons_freeze(struct virtio_device *vdev) virtqueue_disable_cb(portdev->c_ivq); cancel_work_sync(&portdev->control_work); + cancel_work_sync(&portdev->config_work); /* * Once more: if control_work_handler() was running, it would * enable the cb as the last step. diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index db7f8bce7467..25006a8bb8e6 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, divider->flags); } -/* - * The reverse of DIV_ROUND_UP: The maximum number which - * divided by m is r - */ -#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) - static bool _is_valid_table_div(const struct clk_div_table *table, unsigned int div) { @@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table, unsigned long parent_rate, unsigned long rate, unsigned long flags) { - int up, down, div; + int up, down; + unsigned long up_rate, down_rate; - up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); + up = DIV_ROUND_UP(parent_rate, rate); + down = parent_rate / rate; if (flags & CLK_DIVIDER_POWER_OF_TWO) { - up = __roundup_pow_of_two(div); - down = __rounddown_pow_of_two(div); + up = __roundup_pow_of_two(up); + down = __rounddown_pow_of_two(down); } else if (table) { - up = _round_up_table(table, div); - down = _round_down_table(table, div); + up = _round_up_table(table, up); + down = _round_down_table(table, down); } - return (up - div) <= (div - down) ? up : down; + up_rate = DIV_ROUND_UP(parent_rate, up); + down_rate = DIV_ROUND_UP(parent_rate, down); + + return (rate - up_rate) <= (down_rate - rate) ? up : down; } static int _div_round(const struct clk_div_table *table, @@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, return i; } parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), - MULT_ROUND_UP(rate, i)); + rate * i); now = DIV_ROUND_UP(parent_rate, i); if (_is_best_div(rate, now, best, flags)) { bestdiv = i; @@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, bestdiv = readl(divider->reg) >> divider->shift; bestdiv &= div_mask(divider->width); bestdiv = _get_div(divider->table, bestdiv, divider->flags); - return bestdiv; + return DIV_ROUND_UP(*prate, bestdiv); } return divider_round_rate(hw, rate, prate, divider->table, diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index eb0152961d3c..237f23f68bfc 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk) return rate; } -EXPORT_SYMBOL_GPL(clk_core_get_rate); /** * clk_get_rate - return the rate of clk @@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk) } /** + * clk_is_match - check if two clk's point to the same hardware clock + * @p: clk compared against q + * @q: clk compared against p + * + * Returns true if the two struct clk pointers both point to the same hardware + * clock node. Put differently, returns true if struct clk *p and struct clk *q + * share the same struct clk_core object. + * + * Returns false otherwise. Note that two NULL clks are treated as matching. + */ +bool clk_is_match(const struct clk *p, const struct clk *q) +{ + /* trivial case: identical struct clk's or both NULL */ + if (p == q) + return true; + + /* true if clk->core pointers match. Avoid derefing garbage */ + if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) + if (p->core == q->core) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(clk_is_match); + +/** * __clk_init - initialize the data structures in a struct clk * @dev: device initializing this clk, placeholder for now * @clk: clk being initialized diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index b0b562b9ce0e..e60feffc10a1 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c @@ -48,6 +48,17 @@ static struct clk_pll pll3 = { }, }; +static struct clk_regmap pll4_vote = { + .enable_reg = 0x34c0, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "pll4_vote", + .parent_names = (const char *[]){ "pll4" }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + static struct clk_pll pll8 = { .l_reg = 0x3144, .m_reg = 0x3148, @@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = { static struct clk_regmap *gcc_msm8960_clks[] = { [PLL3] = &pll3.clkr, + [PLL4_VOTE] = &pll4_vote, [PLL8] = &pll8.clkr, [PLL8_VOTE] = &pll8_vote, [PLL14] = &pll14.clkr, @@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = { static struct clk_regmap *gcc_apq8064_clks[] = { [PLL3] = &pll3.clkr, + [PLL4_VOTE] = &pll4_vote, [PLL8] = &pll8.clkr, [PLL8_VOTE] = &pll8_vote, [PLL14] = &pll14.clkr, diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index 121ffde25dc3..c9ff27b4648b 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c @@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = { .remove = lcc_ipq806x_remove, .driver = { .name = "lcc-ipq806x", - .owner = THIS_MODULE, .of_match_table = lcc_ipq806x_match_table, }, }; diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c index a75a408cfccd..e2c863295f00 100644 --- a/drivers/clk/qcom/lcc-msm8960.c +++ b/drivers/clk/qcom/lcc-msm8960.c @@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = { .mnctr_en_bit = 8, .mnctr_reset_bit = 7, .mnctr_mode_shift = 5, - .n_val_shift = 16, - .m_val_shift = 16, + .n_val_shift = 24, + .m_val_shift = 8, .width = 8, }, .p = { @@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev) return PTR_ERR(regmap); /* Use the correct frequency plan depending on speed of PLL4 */ - val = regmap_read(regmap, 0x4, &val); + regmap_read(regmap, 0x4, &val); if (val == 0x12) { slimbus_src.freq_tbl = clk_tbl_aif_osr_492; mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; @@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = { .remove = lcc_msm8960_remove, .driver = { .name = "lcc-msm8960", - .owner = THIS_MODULE, .of_match_table = lcc_msm8960_match_table, }, }; diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 6ef89639a9f6..d21640634adf 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c @@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw) struct fapll_data *fd = to_fapll(hw); u32 v = readl_relaxed(fd->base); - v |= (1 << FAPLL_MAIN_PLLEN); + v |= FAPLL_MAIN_PLLEN; writel_relaxed(v, fd->base); return 0; @@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw) struct fapll_data *fd = to_fapll(hw); u32 v = readl_relaxed(fd->base); - v &= ~(1 << FAPLL_MAIN_PLLEN); + v &= ~FAPLL_MAIN_PLLEN; writel_relaxed(v, fd->base); } @@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw) struct fapll_data *fd = to_fapll(hw); u32 v = readl_relaxed(fd->base); - return v & (1 << FAPLL_MAIN_PLLEN); + return v & FAPLL_MAIN_PLLEN; } static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c index bba62f9deefb..ec57ba2bbd87 100644 --- a/drivers/clocksource/time-efm32.c +++ b/drivers/clocksource/time-efm32.c @@ -225,12 +225,12 @@ static int __init efm32_clockevent_init(struct device_node *np) clock_event_ddata.base = base; clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); - setup_irq(irq, &efm32_clock_event_irq); - clockevents_config_and_register(&clock_event_ddata.evtdev, DIV_ROUND_CLOSEST(rate, 1024), 0xf, 0xffff); + setup_irq(irq, &efm32_clock_event_irq); + return 0; err_get_irq: diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 02268448dc85..5dcbf90b8015 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -178,10 +178,6 @@ static void __init sun5i_timer_init(struct device_node *node) ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); - ret = setup_irq(irq, &sun5i_timer_irq); - if (ret) - pr_warn("failed to setup irq %d\n", irq); - /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); @@ -191,6 +187,10 @@ static void __init sun5i_timer_init(struct device_node *node) clockevents_config_and_register(&sun5i_clockevent, rate, TIMER_SYNC_TICKS, 0xffffffff); + + ret = setup_irq(irq, &sun5i_timer_irq); + if (ret) + pr_warn("failed to setup irq %d\n", irq); } CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer", sun5i_timer_init); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6b6b07ff720b..f6d04c7b5115 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -43,9 +43,10 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv); +static struct drm_framebuffer * +internal_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv); /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ @@ -2908,13 +2909,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, */ if (req->flags & DRM_MODE_CURSOR_BO) { if (req->handle) { - fb = add_framebuffer_internal(dev, &fbreq, file_priv); + fb = internal_framebuffer_create(dev, &fbreq, file_priv); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); } - - drm_framebuffer_reference(fb); } else { fb = NULL; } @@ -3267,9 +3266,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) return 0; } -static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv) +static struct drm_framebuffer * +internal_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; @@ -3301,12 +3301,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, return fb; } - mutex_lock(&file_priv->fbs_lock); - r->fb_id = fb->base.id; - list_add(&fb->filp_head, &file_priv->fbs); - DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); - mutex_unlock(&file_priv->fbs_lock); - return fb; } @@ -3328,15 +3322,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, int drm_mode_addfb2(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_mode_fb_cmd2 *r = data; struct drm_framebuffer *fb; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - fb = add_framebuffer_internal(dev, data, file_priv); + fb = internal_framebuffer_create(dev, r, file_priv); if (IS_ERR(fb)) return PTR_ERR(fb); + /* Transfer ownership to the filp for reaping on close */ + + DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); + mutex_lock(&file_priv->fbs_lock); + r->fb_id = fb->base.id; + list_add(&fb->filp_head, &file_priv->fbs); + mutex_unlock(&file_priv->fbs_lock); + return 0; } diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 9a5b68717ec8..379ab4555756 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg) { bool ret; - mutex_lock(&mgr->qlock); + + /* + * All updates to txmsg->state are protected by mgr->qlock, and the two + * cases we check here are terminal states. For those the barriers + * provided by the wake_up/wait_event pair are enough. + */ ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); - mutex_unlock(&mgr->qlock); return ret; } @@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, return 0; } -/* must be called holding qlock */ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_sideband_msg_tx *txmsg; int ret; + WARN_ON(!mutex_is_locked(&mgr->qlock)); + /* construct a chunk from the first msg in the tx_msg queue */ if (list_empty(&mgr->tx_msg_downq)) { mgr->tx_down_in_progress = false; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 7fc6f8bd4821..1134526286c8 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -403,7 +403,7 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) unsigned rem; rem = do_div(tmp, alignment); - if (tmp) + if (rem) start += alignment - rem; } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e5daad5f75fb..5b205863b659 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2936,9 +2936,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) req = obj->last_read_req; /* Do this after OLR check to make sure we make forward progress polling - * on this IOCTL with a timeout <=0 (like busy ioctl) + * on this IOCTL with a timeout == 0 (like busy ioctl) */ - if (args->timeout_ns <= 0) { + if (args->timeout_ns == 0) { ret = -ETIME; goto out; } @@ -2948,7 +2948,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) i915_gem_request_reference(req); mutex_unlock(&dev->struct_mutex); - ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, + ret = __i915_wait_request(req, reset_counter, true, + args->timeout_ns > 0 ? &args->timeout_ns : NULL, file->driver_priv); mutex_lock(&dev->struct_mutex); i915_gem_request_unreference(req); @@ -4792,6 +4793,9 @@ i915_gem_init_hw(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; + /* Double layer security blanket, see i915_gem_init() */ + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + if (dev_priv->ellc_size) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); @@ -4824,7 +4828,7 @@ i915_gem_init_hw(struct drm_device *dev) for_each_ring(ring, dev_priv, i) { ret = ring->init_hw(ring); if (ret) - return ret; + goto out; } for (i = 0; i < NUM_L3_SLICES(dev); i++) @@ -4841,9 +4845,11 @@ i915_gem_init_hw(struct drm_device *dev) DRM_ERROR("Context enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); - return ret; + goto out; } +out: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; } @@ -4877,6 +4883,14 @@ int i915_gem_init(struct drm_device *dev) dev_priv->gt.stop_ring = intel_logical_ring_stop; } + /* This is just a security blanket to placate dragons. + * On some systems, we very sporadically observe that the first TLBs + * used by the CS may be stale, despite us poking the TLB reset. If + * we hold the forcewake during initialisation these problems + * just magically go away. + */ + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + ret = i915_gem_init_userptr(dev); if (ret) goto out_unlock; @@ -4903,6 +4917,7 @@ int i915_gem_init(struct drm_device *dev) } out_unlock: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); mutex_unlock(&dev->struct_mutex); return ret; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e730789b53b7..9943c20a741d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9716,7 +9716,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - WARN_ON(!in_irq()); + WARN_ON(!in_interrupt()); if (crtc == NULL) return; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c47a3baa53d5..4e8fb891d4ea 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) /* We need to init first for ECOBUS access and then * determine later if we want to reinit, in case of MT access is - * not working + * not working. In this stage we don't know which flavour this + * ivb is, so it is better to reset also the gen6 fw registers + * before the ecobus check. */ + + __raw_i915_write32(dev_priv, FORCEWAKE, 0); + __raw_posting_read(dev_priv, ECOBUS); + fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d13d1b5a859f..df09ca7c4889 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); } +struct radeon_wait_cb { + struct fence_cb base; + struct task_struct *task; +}; + +static void +radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) +{ + struct radeon_wait_cb *wait = + container_of(cb, struct radeon_wait_cb, base); + + wake_up_process(wait->task); +} + static signed long radeon_fence_default_wait(struct fence *f, bool intr, signed long t) { struct radeon_fence *fence = to_radeon_fence(f); struct radeon_device *rdev = fence->rdev; - bool signaled; + struct radeon_wait_cb cb; - fence_enable_sw_signaling(&fence->base); + cb.task = current; - /* - * This function has to return -EDEADLK, but cannot hold - * exclusive_lock during the wait because some callers - * may already hold it. This means checking needs_reset without - * lock, and not fiddling with any gpu internals. - * - * The callback installed with fence_enable_sw_signaling will - * run before our wait_event_*timeout call, so we will see - * both the signaled fence and the changes to needs_reset. - */ + if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) + return t; + + while (t > 0) { + if (intr) + set_current_state(TASK_INTERRUPTIBLE); + else + set_current_state(TASK_UNINTERRUPTIBLE); + + /* + * radeon_test_signaled must be called after + * set_current_state to prevent a race with wake_up_process + */ + if (radeon_test_signaled(fence)) + break; + + if (rdev->needs_reset) { + t = -EDEADLK; + break; + } + + t = schedule_timeout(t); + + if (t > 0 && intr && signal_pending(current)) + t = -ERESTARTSYS; + } + + __set_current_state(TASK_RUNNING); + fence_remove_callback(f, &cb.base); - if (intr) - t = wait_event_interruptible_timeout(rdev->fence_queue, - ((signaled = radeon_test_signaled(fence)) || - rdev->needs_reset), t); - else - t = wait_event_timeout(rdev->fence_queue, - ((signaled = radeon_test_signaled(fence)) || - rdev->needs_reset), t); - - if (t > 0 && !signaled) - return -EDEADLK; return t; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e088e5558da0..a7fb2735d4a9 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -7130,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); if (!vclk || !dclk) { - /* keep the Bypass mode, put PLL to sleep */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* keep the Bypass mode */ return 0; } @@ -7147,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) /* set VCO_MODE to 1 */ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); - /* toggle UPLL_SLEEP to 1 then back to 0 */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* disable sleep mode */ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); /* deassert UPLL_RESET */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6c6b655defcf..e13b9cbc304e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_err1; } - ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, - (dev_priv->vram_size >> PAGE_SHIFT)); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed initializing memory manager for VRAM.\n"); - goto out_err2; - } - - dev_priv->has_gmr = true; - if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || - refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, - VMW_PL_GMR) != 0) { - DRM_INFO("No GMR memory available. " - "Graphics memory resources are very limited.\n"); - dev_priv->has_gmr = false; - } - - if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { - dev_priv->has_mob = true; - if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, - VMW_PL_MOB) != 0) { - DRM_INFO("No MOB memory available. " - "3D will be disabled.\n"); - dev_priv->has_mob = false; - } - } - dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, dev_priv->mmio_size); @@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_fman; } + + ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, + (dev_priv->vram_size >> PAGE_SHIFT)); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed initializing memory manager for VRAM.\n"); + goto out_no_vram; + } + + dev_priv->has_gmr = true; + if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || + refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, + VMW_PL_GMR) != 0) { + DRM_INFO("No GMR memory available. " + "Graphics memory resources are very limited.\n"); + dev_priv->has_gmr = false; + } + + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { + dev_priv->has_mob = true; + if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, + VMW_PL_MOB) != 0) { + DRM_INFO("No MOB memory available. " + "3D will be disabled.\n"); + dev_priv->has_mob = false; + } + } + vmw_kms_save_vga(dev_priv); /* Start kms and overlay systems, needs fifo. */ @@ -838,6 +839,12 @@ out_no_fifo: vmw_kms_close(dev_priv); out_no_kms: vmw_kms_restore_vga(dev_priv); + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + if (dev_priv->has_gmr) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); +out_no_vram: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) @@ -853,12 +860,6 @@ out_err4: iounmap(dev_priv->mmio_virt); out_err3: arch_phys_wc_del(dev_priv->mmio_mtrr); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); - if (dev_priv->has_gmr) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); - (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); -out_err2: (void)ttm_bo_device_release(&dev_priv->bdev); out_err1: vmw_ttm_global_release(dev_priv); @@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev) } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); + + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + if (dev_priv->has_gmr) + (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); + vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); @@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev) ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); arch_phys_wc_del(dev_priv->mmio_mtrr); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); - if (dev_priv->has_gmr) - (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); - (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); @@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + pci_disable_device(pdev); drm_put_dev(dev); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 33176d05db35..654c8daeb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use MOB buffer.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_no_reloc; } bo = &vmw_bo->base; @@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, out_no_reloc: vmw_dmabuf_unreference(&vmw_bo); - vmw_bo_p = NULL; + *vmw_bo_p = NULL; return ret; } @@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_no_reloc; } bo = &vmw_bo->base; @@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, out_no_reloc: vmw_dmabuf_unreference(&vmw_bo); - vmw_bo_p = NULL; + *vmw_bo_p = NULL; return ret; } @@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, NULL, arg->command_size, arg->throttle_us, (void __user *)(unsigned long)arg->fence_rep, NULL); - + ttm_read_unlock(&dev_priv->reservation_sem); if (unlikely(ret != 0)) - goto out_unlock; + return ret; vmw_kms_cursor_post_execbuf(dev_priv); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); - return ret; + return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8725b79e7847..07cda8cbbddb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, int i; struct drm_mode_config *mode_config = &dev->mode_config; - ret = ttm_read_lock(&dev_priv->reservation_sem, true); - if (unlikely(ret != 0)) - return ret; - if (!arg->num_outputs) { struct drm_vmw_rect def_rect = {0, 0, 800, 600}; vmw_du_update_layout(dev_priv, 1, &def_rect); - goto out_unlock; + return 0; } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), GFP_KERNEL); - if (unlikely(!rects)) { - ret = -ENOMEM; - goto out_unlock; - } + if (unlikely(!rects)) + return -ENOMEM; user_rects = (void __user *)(unsigned long)arg->rects; ret = copy_from_user(rects, user_rects, rects_size); @@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, out_free: kfree(rects); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); return ret; } diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 7c669c328c4c..56ce8c2b5530 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1959,6 +1959,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 204312bfab2c..9c4786759f16 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -586,6 +586,7 @@ #define USB_VENDOR_ID_LOGITECH 0x046d #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e #define USB_DEVICE_ID_LOGITECH_T651 0xb00c +#define USB_DEVICE_ID_LOGITECH_C077 0xc007 #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f @@ -898,6 +899,7 @@ #define USB_VENDOR_ID_TIVO 0x150a #define USB_DEVICE_ID_TIVO_SLIDE_BT 0x1200 #define USB_DEVICE_ID_TIVO_SLIDE 0x1201 +#define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203 #define USB_VENDOR_ID_TOPSEED 0x0766 #define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204 diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c index d790d8d71f7f..d98696927453 100644 --- a/drivers/hid/hid-tivo.c +++ b/drivers/hid/hid-tivo.c @@ -64,6 +64,7 @@ static const struct hid_device_id tivo_devices[] = { /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, { } }; MODULE_DEVICE_TABLE(hid, tivo_devices); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 9be99a67bfe2..a82127753461 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -78,6 +78,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 046351cf17f3..bbe32d66e500 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -551,9 +551,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) (features->type == CINTIQ && !(data[1] & 0x40))) return 1; - if (features->quirks & WACOM_QUIRK_MULTI_INPUT) + if (wacom->shared) { wacom->shared->stylus_in_proximity = true; + if (wacom->shared->touch_down) + return 1; + } + /* in Range while exiting */ if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) { input_report_key(input, BTN_TOUCH, 0); @@ -1043,27 +1047,28 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom) struct input_dev *input = wacom->input; unsigned char *data = wacom->data; int i; - int current_num_contacts = 0; + int current_num_contacts = data[61]; int contacts_to_send = 0; int num_contacts_left = 4; /* maximum contacts per packet */ int byte_per_packet = WACOM_BYTES_PER_24HDT_PACKET; int y_offset = 2; + static int contact_with_no_pen_down_count = 0; if (wacom->features.type == WACOM_27QHDT) { current_num_contacts = data[63]; num_contacts_left = 10; byte_per_packet = WACOM_BYTES_PER_QHDTHID_PACKET; y_offset = 0; - } else { - current_num_contacts = data[61]; } /* * First packet resets the counter since only the first * packet in series will have non-zero current_num_contacts. */ - if (current_num_contacts) + if (current_num_contacts) { wacom->num_contacts_left = current_num_contacts; + contact_with_no_pen_down_count = 0; + } contacts_to_send = min(num_contacts_left, wacom->num_contacts_left); @@ -1096,15 +1101,16 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom) input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h)); input_report_abs(input, ABS_MT_ORIENTATION, w > h); } + contact_with_no_pen_down_count++; } } input_mt_report_pointer_emulation(input, true); wacom->num_contacts_left -= contacts_to_send; - if (wacom->num_contacts_left <= 0) + if (wacom->num_contacts_left <= 0) { wacom->num_contacts_left = 0; - - wacom->shared->touch_down = (wacom->num_contacts_left > 0); + wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); + } return 1; } @@ -1116,6 +1122,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom) int current_num_contacts = data[2]; int contacts_to_send = 0; int x_offset = 0; + static int contact_with_no_pen_down_count = 0; /* MTTPC does not support Height and Width */ if (wacom->features.type == MTTPC || wacom->features.type == MTTPC_B) @@ -1125,8 +1132,10 @@ static int wacom_mt_touch(struct wacom_wac *wacom) * First packet resets the counter since only the first * packet in series will have non-zero current_num_contacts. */ - if (current_num_contacts) + if (current_num_contacts) { wacom->num_contacts_left = current_num_contacts; + contact_with_no_pen_down_count = 0; + } /* There are at most 5 contacts per packet */ contacts_to_send = min(5, wacom->num_contacts_left); @@ -1147,15 +1156,16 @@ static int wacom_mt_touch(struct wacom_wac *wacom) int y = get_unaligned_le16(&data[offset + x_offset + 9]); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); + contact_with_no_pen_down_count++; } } input_mt_report_pointer_emulation(input, true); wacom->num_contacts_left -= contacts_to_send; - if (wacom->num_contacts_left < 0) + if (wacom->num_contacts_left <= 0) { wacom->num_contacts_left = 0; - - wacom->shared->touch_down = (wacom->num_contacts_left > 0); + wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); + } return 1; } @@ -1193,29 +1203,25 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; struct input_dev *input = wacom->input; - bool prox; + bool prox = !wacom->shared->stylus_in_proximity; int x = 0, y = 0; if (wacom->features.touch_max > 1 || len > WACOM_PKGLEN_TPC2FG) return 0; - if (!wacom->shared->stylus_in_proximity) { - if (len == WACOM_PKGLEN_TPC1FG) { - prox = data[0] & 0x01; - x = get_unaligned_le16(&data[1]); - y = get_unaligned_le16(&data[3]); - } else if (len == WACOM_PKGLEN_TPC1FG_B) { - prox = data[2] & 0x01; - x = get_unaligned_le16(&data[3]); - y = get_unaligned_le16(&data[5]); - } else { - prox = data[1] & 0x01; - x = le16_to_cpup((__le16 *)&data[2]); - y = le16_to_cpup((__le16 *)&data[4]); - } - } else - /* force touch out when pen is in prox */ - prox = 0; + if (len == WACOM_PKGLEN_TPC1FG) { + prox = prox && (data[0] & 0x01); + x = get_unaligned_le16(&data[1]); + y = get_unaligned_le16(&data[3]); + } else if (len == WACOM_PKGLEN_TPC1FG_B) { + prox = prox && (data[2] & 0x01); + x = get_unaligned_le16(&data[3]); + y = get_unaligned_le16(&data[5]); + } else { + prox = prox && (data[1] & 0x01); + x = le16_to_cpup((__le16 *)&data[2]); + y = le16_to_cpup((__le16 *)&data[4]); + } if (prox) { input_report_abs(input, ABS_X, x); @@ -1613,6 +1619,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; int i; + int contact_with_no_pen_down_count = 0; if (data[0] != 0x02) return 0; @@ -1640,6 +1647,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) } input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); + contact_with_no_pen_down_count++; } } @@ -1649,11 +1657,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0); input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0); input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0); + wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); return 1; } -static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) +static int wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data, int last_touch_count) { struct wacom_features *features = &wacom->features; struct input_dev *input = wacom->input; @@ -1661,7 +1670,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) int slot = input_mt_get_slot_by_key(input, data[0]); if (slot < 0) - return; + return 0; touch = touch && !wacom->shared->stylus_in_proximity; @@ -1693,7 +1702,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) input_report_abs(input, ABS_MT_POSITION_Y, y); input_report_abs(input, ABS_MT_TOUCH_MAJOR, width); input_report_abs(input, ABS_MT_TOUCH_MINOR, height); + last_touch_count++; } + return last_touch_count; } static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) @@ -1718,6 +1729,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) unsigned char *data = wacom->data; int count = data[1] & 0x07; int i; + int contact_with_no_pen_down_count = 0; if (data[0] != 0x02) return 0; @@ -1728,12 +1740,15 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) int msg_id = data[offset]; if (msg_id >= 2 && msg_id <= 17) - wacom_bpt3_touch_msg(wacom, data + offset); + contact_with_no_pen_down_count = + wacom_bpt3_touch_msg(wacom, data + offset, + contact_with_no_pen_down_count); else if (msg_id == 128) wacom_bpt3_button_msg(wacom, data + offset); } input_mt_report_pointer_emulation(input, true); + wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); return 1; } @@ -1759,6 +1774,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom) return 0; } + if (wacom->shared->touch_down) + return 0; + prox = (data[1] & 0x20) == 0x20; /* diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 210cf4874cb7..edf274cabe81 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev) status = driver->remove(client); } - if (dev->of_node) - irq_dispose_mapping(client->irq); - dev_pm_domain_detach(&client->dev, true); return status; } diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 1793aea4a7d2..6eb738ca6d2f 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -1793,11 +1793,11 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) tape->best_dsc_rw_freq = clamp_t(unsigned long, t, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX); printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " - "%lums tDSC%s\n", + "%ums tDSC%s\n", drive->name, tape->name, *(u16 *)&tape->caps[14], (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, tape->buffer_size / 1024, - tape->best_dsc_rw_freq * 1000 / HZ, + jiffies_to_msecs(tape->best_dsc_rw_freq), (drive->dev_flags & IDE_DFLAG_USING_DMA) ? ", DMA" : ""); ide_proc_register_driver(drive, tape->driver); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index c7619716c31d..59040265e361 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -64,6 +64,14 @@ enum { #define GUID_TBL_BLK_NUM_ENTRIES 8 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES) +/* Counters should be saturate once they reach their maximum value */ +#define ASSIGN_32BIT_COUNTER(counter, value) do {\ + if ((value) > U32_MAX) \ + counter = cpu_to_be32(U32_MAX); \ + else \ + counter = cpu_to_be32(value); \ +} while (0) + struct mlx4_mad_rcv_buf { struct ib_grh grh; u8 payload[256]; @@ -806,10 +814,14 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, static void edit_counter(struct mlx4_counter *cnt, struct ib_pma_portcounters *pma_cnt) { - pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); - pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); - pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); - pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, + (be64_to_cpu(cnt->tx_bytes) >> 2)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, + (be64_to_cpu(cnt->rx_bytes) >> 2)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, + be64_to_cpu(cnt->tx_frames)); + ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, + be64_to_cpu(cnt->rx_frames)); } static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ac6e2b710ea6..b972c0b41799 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2697,8 +2697,12 @@ static void handle_bonded_port_state_event(struct work_struct *work) spin_lock_bh(&ibdev->iboe.lock); for (i = 0; i < MLX4_MAX_PORTS; ++i) { struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; + enum ib_port_state curr_port_state; - enum ib_port_state curr_port_state = + if (!curr_netdev) + continue; + + curr_port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index f2cceb6493a0..dda605836546 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -67,9 +67,6 @@ #define X_MAX_POSITIVE 8176 #define Y_MAX_POSITIVE 8176 -/* maximum ABS_MT_POSITION displacement (in mm) */ -#define DMAX 10 - /***************************************************************************** * Stuff we need even when we do not want native Synaptics support ****************************************************************************/ @@ -123,32 +120,41 @@ void synaptics_reset(struct psmouse *psmouse) static bool cr48_profile_sensor; +#define ANY_BOARD_ID 0 struct min_max_quirk { const char * const *pnp_ids; + struct { + unsigned long int min, max; + } board_id; int x_min, x_max, y_min, y_max; }; static const struct min_max_quirk min_max_pnpid_table[] = { { (const char * const []){"LEN0033", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, 1024, 5052, 2258, 4832 }, { - (const char * const []){"LEN0035", "LEN0042", NULL}, + (const char * const []){"LEN0042", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, 1232, 5710, 1156, 4696 }, { (const char * const []){"LEN0034", "LEN0036", "LEN0037", "LEN0039", "LEN2002", "LEN2004", NULL}, + {ANY_BOARD_ID, 2961}, 1024, 5112, 2024, 4832 }, { (const char * const []){"LEN2001", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, 1024, 5022, 2508, 4832 }, { (const char * const []){"LEN2006", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, 1264, 5675, 1171, 4688 }, { } @@ -175,9 +181,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0041", "LEN0042", /* Yoga */ "LEN0045", - "LEN0046", "LEN0047", - "LEN0048", "LEN0049", "LEN2000", "LEN2001", /* Edge E431 */ @@ -235,18 +239,39 @@ static int synaptics_model_id(struct psmouse *psmouse) return 0; } +static int synaptics_more_extended_queries(struct psmouse *psmouse) +{ + struct synaptics_data *priv = psmouse->private; + unsigned char buf[3]; + + if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf)) + return -1; + + priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2]; + + return 0; +} + /* - * Read the board id from the touchpad + * Read the board id and the "More Extended Queries" from the touchpad * The board id is encoded in the "QUERY MODES" response */ -static int synaptics_board_id(struct psmouse *psmouse) +static int synaptics_query_modes(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char bid[3]; + /* firmwares prior 7.5 have no board_id encoded */ + if (SYN_ID_FULL(priv->identity) < 0x705) + return 0; + if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) return -1; priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; + + if (SYN_MEXT_CAP_BIT(bid[0])) + return synaptics_more_extended_queries(psmouse); + return 0; } @@ -346,7 +371,6 @@ static int synaptics_resolution(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char resp[3]; - int i; if (SYN_ID_MAJOR(priv->identity) < 4) return 0; @@ -358,17 +382,6 @@ static int synaptics_resolution(struct psmouse *psmouse) } } - for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) { - if (psmouse_matches_pnp_id(psmouse, - min_max_pnpid_table[i].pnp_ids)) { - priv->x_min = min_max_pnpid_table[i].x_min; - priv->x_max = min_max_pnpid_table[i].x_max; - priv->y_min = min_max_pnpid_table[i].y_min; - priv->y_max = min_max_pnpid_table[i].y_max; - return 0; - } - } - if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 && SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) { if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) { @@ -377,23 +390,69 @@ static int synaptics_resolution(struct psmouse *psmouse) } else { priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); + psmouse_info(psmouse, + "queried max coordinates: x [..%d], y [..%d]\n", + priv->x_max, priv->y_max); } } - if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 && - SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) { + if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) && + (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 || + /* + * Firmware v8.1 does not report proper number of extended + * capabilities, but has been proven to report correct min + * coordinates. + */ + SYN_ID_FULL(priv->identity) == 0x801)) { if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { psmouse_warn(psmouse, "device claims to have min coordinates query, but I'm not able to read it.\n"); } else { priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); + psmouse_info(psmouse, + "queried min coordinates: x [%d..], y [%d..]\n", + priv->x_min, priv->y_min); } } return 0; } +/* + * Apply quirk(s) if the hardware matches + */ + +static void synaptics_apply_quirks(struct psmouse *psmouse) +{ + struct synaptics_data *priv = psmouse->private; + int i; + + for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) { + if (!psmouse_matches_pnp_id(psmouse, + min_max_pnpid_table[i].pnp_ids)) + continue; + + if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID && + priv->board_id < min_max_pnpid_table[i].board_id.min) + continue; + + if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID && + priv->board_id > min_max_pnpid_table[i].board_id.max) + continue; + + priv->x_min = min_max_pnpid_table[i].x_min; + priv->x_max = min_max_pnpid_table[i].x_max; + priv->y_min = min_max_pnpid_table[i].y_min; + priv->y_max = min_max_pnpid_table[i].y_max; + psmouse_info(psmouse, + "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n", + priv->x_min, priv->x_max, + priv->y_min, priv->y_max); + break; + } +} + static int synaptics_query_hardware(struct psmouse *psmouse) { if (synaptics_identify(psmouse)) @@ -402,13 +461,15 @@ static int synaptics_query_hardware(struct psmouse *psmouse) return -1; if (synaptics_firmware_id(psmouse)) return -1; - if (synaptics_board_id(psmouse)) + if (synaptics_query_modes(psmouse)) return -1; if (synaptics_capability(psmouse)) return -1; if (synaptics_resolution(psmouse)) return -1; + synaptics_apply_quirks(psmouse); + return 0; } @@ -516,18 +577,22 @@ static int synaptics_is_pt_packet(unsigned char *buf) return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; } -static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet) +static void synaptics_pass_pt_packet(struct psmouse *psmouse, + struct serio *ptport, + unsigned char *packet) { + struct synaptics_data *priv = psmouse->private; struct psmouse *child = serio_get_drvdata(ptport); if (child && child->state == PSMOUSE_ACTIVATED) { - serio_interrupt(ptport, packet[1], 0); + serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0); serio_interrupt(ptport, packet[4], 0); serio_interrupt(ptport, packet[5], 0); if (child->pktsize == 4) serio_interrupt(ptport, packet[2], 0); - } else + } else { serio_interrupt(ptport, packet[1], 0); + } } static void synaptics_pt_activate(struct psmouse *psmouse) @@ -605,6 +670,18 @@ static void synaptics_parse_agm(const unsigned char buf[], } } +static void synaptics_parse_ext_buttons(const unsigned char buf[], + struct synaptics_data *priv, + struct synaptics_hw_state *hw) +{ + unsigned int ext_bits = + (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1; + unsigned int ext_mask = GENMASK(ext_bits - 1, 0); + + hw->ext_buttons = buf[4] & ext_mask; + hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits; +} + static bool is_forcepad; static int synaptics_parse_hw_state(const unsigned char buf[], @@ -691,28 +768,9 @@ static int synaptics_parse_hw_state(const unsigned char buf[], hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; } - if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && + if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 && ((buf[0] ^ buf[3]) & 0x02)) { - switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { - default: - /* - * if nExtBtn is greater than 8 it should be - * considered invalid and treated as 0 - */ - break; - case 8: - hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0; - hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0; - case 6: - hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0; - hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0; - case 4: - hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0; - hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0; - case 2: - hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0; - hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0; - } + synaptics_parse_ext_buttons(buf, priv, hw); } } else { hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); @@ -774,12 +832,54 @@ static void synaptics_report_semi_mt_data(struct input_dev *dev, } } +static void synaptics_report_ext_buttons(struct psmouse *psmouse, + const struct synaptics_hw_state *hw) +{ + struct input_dev *dev = psmouse->dev; + struct synaptics_data *priv = psmouse->private; + int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1; + char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + int i; + + if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap)) + return; + + /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */ + if (SYN_ID_FULL(priv->identity) == 0x801 && + !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02)) + return; + + if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) { + for (i = 0; i < ext_bits; i++) { + input_report_key(dev, BTN_0 + 2 * i, + hw->ext_buttons & (1 << i)); + input_report_key(dev, BTN_1 + 2 * i, + hw->ext_buttons & (1 << (i + ext_bits))); + } + return; + } + + /* + * This generation of touchpads has the trackstick buttons + * physically wired to the touchpad. Re-route them through + * the pass-through interface. + */ + if (!priv->pt_port) + return; + + /* The trackstick expects at most 3 buttons */ + priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons) | + SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 | + SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2; + + synaptics_pass_pt_packet(psmouse, priv->pt_port, buf); +} + static void synaptics_report_buttons(struct psmouse *psmouse, const struct synaptics_hw_state *hw) { struct input_dev *dev = psmouse->dev; struct synaptics_data *priv = psmouse->private; - int i; input_report_key(dev, BTN_LEFT, hw->left); input_report_key(dev, BTN_RIGHT, hw->right); @@ -792,8 +892,7 @@ static void synaptics_report_buttons(struct psmouse *psmouse, input_report_key(dev, BTN_BACK, hw->down); } - for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) - input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i)); + synaptics_report_ext_buttons(psmouse, hw); } static void synaptics_report_mt_data(struct psmouse *psmouse, @@ -813,7 +912,7 @@ static void synaptics_report_mt_data(struct psmouse *psmouse, pos[i].y = synaptics_invert_y(hw[i]->y); } - input_mt_assign_slots(dev, slot, pos, nsemi, DMAX * priv->x_res); + input_mt_assign_slots(dev, slot, pos, nsemi, 0); for (i = 0; i < nsemi; i++) { input_mt_slot(dev, slot[i]); @@ -1014,7 +1113,8 @@ static psmouse_ret_t synaptics_process_byte(struct psmouse *psmouse) if (SYN_CAP_PASS_THROUGH(priv->capabilities) && synaptics_is_pt_packet(psmouse->packet)) { if (priv->pt_port) - synaptics_pass_pt_packet(priv->pt_port, psmouse->packet); + synaptics_pass_pt_packet(psmouse, priv->pt_port, + psmouse->packet); } else synaptics_process_packet(psmouse); @@ -1116,8 +1216,9 @@ static void set_input_params(struct psmouse *psmouse, __set_bit(BTN_BACK, dev->keybit); } - for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) - __set_bit(BTN_0 + i, dev->keybit); + if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) + for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) + __set_bit(BTN_0 + i, dev->keybit); __clear_bit(EV_REL, dev->evbit); __clear_bit(REL_X, dev->relbit); @@ -1125,7 +1226,8 @@ static void set_input_params(struct psmouse *psmouse, if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); - if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) + if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && + !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); /* Clickpads report only left button */ __clear_bit(BTN_RIGHT, dev->keybit); diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index aedc3299b14e..ee4bd0d12b26 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -22,6 +22,7 @@ #define SYN_QUE_EXT_CAPAB_0C 0x0c #define SYN_QUE_EXT_MAX_COORDS 0x0d #define SYN_QUE_EXT_MIN_COORDS 0x0f +#define SYN_QUE_MEXT_CAPAB_10 0x10 /* synatics modes */ #define SYN_BIT_ABSOLUTE_MODE (1 << 7) @@ -53,6 +54,7 @@ #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) +#define SYN_MEXT_CAP_BIT(m) ((m) & (1 << 1)) /* * The following describes response for the 0x0c query. @@ -89,6 +91,30 @@ #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) +/* + * The following descibes response for the 0x10 query. + * + * byte mask name meaning + * ---- ---- ------- ------------ + * 1 0x01 ext buttons are stick buttons exported in the extended + * capability are actually meant to be used + * by the tracktick (pass-through). + * 1 0x02 SecurePad the touchpad is a SecurePad, so it + * contains a built-in fingerprint reader. + * 1 0xe0 more ext count how many more extented queries are + * available after this one. + * 2 0xff SecurePad width the width of the SecurePad fingerprint + * reader. + * 3 0xff SecurePad height the height of the SecurePad fingerprint + * reader. + */ +#define SYN_CAP_EXT_BUTTONS_STICK(ex10) ((ex10) & 0x010000) +#define SYN_CAP_SECUREPAD(ex10) ((ex10) & 0x020000) + +#define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01)) +#define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02)) +#define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04)) + /* synaptics modes query bits */ #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) #define SYN_MODE_RATE(m) ((m) & (1 << 6)) @@ -143,6 +169,7 @@ struct synaptics_data { unsigned long int capabilities; /* Capabilities */ unsigned long int ext_cap; /* Extended Capabilities */ unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ + unsigned long int ext_cap_10; /* Ext Caps from 0x10 query */ unsigned long int identity; /* Identification */ unsigned int x_res, y_res; /* X/Y resolution in units/mm */ unsigned int x_max, y_max; /* Max coordinates (from FW) */ @@ -156,6 +183,7 @@ struct synaptics_data { bool disable_gesture; /* disable gestures */ struct serio *pt_port; /* Pass-through serio port */ + unsigned char pt_buttons; /* Pass-through buttons */ /* * Last received Advanced Gesture Mode (AGM) packet. An AGM packet diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index baa0d9786f50..1ae4e547b419 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE config IOMMU_IO_PGTABLE_LPAE bool "ARMv7/v8 Long Descriptor Format" select IOMMU_IO_PGTABLE + depends on ARM || ARM64 || COMPILE_TEST help Enable support for the ARM long descriptor pagetable format. This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page @@ -63,6 +64,7 @@ config MSM_IOMMU bool "MSM IOMMU Support" depends on ARM depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST + depends on BROKEN select IOMMU_API help Support for the IOMMUs found on certain Qualcomm SOCs. diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 7ce52737c7a1..dc14fec4ede1 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = { static int __init exynos_iommu_init(void) { + struct device_node *np; int ret; + np = of_find_matching_node(NULL, sysmmu_of_match); + if (!np) + return 0; + + of_node_put(np); + lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); if (!lv2table_kmem_cache) { diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 5a500edf00cc..b610a8dee238 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -56,7 +56,8 @@ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ * (d)->bits_per_level) + (d)->pg_shift) -#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) +#define ARM_LPAE_PAGES_PER_PGD(d) \ + DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) /* * Calculate the index at level l used to map virtual address a using the @@ -66,7 +67,7 @@ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) #define ARM_LPAE_LVL_IDX(a,l,d) \ - (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ + (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) /* Calculate the block/page mapping size at level l for pagetable in d. */ diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index f59f857b702e..a4ba851825c2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void) struct kmem_cache *p; const unsigned long flags = SLAB_HWCACHE_ALIGN; size_t align = 1 << 10; /* L2 pagetable alignement */ + struct device_node *np; + + np = of_find_matching_node(NULL, omap_iommu_of_match); + if (!np) + return 0; + + of_node_put(np); p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, iopte_cachep_ctor); diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 6a8b1ec4a48a..9f74fddcd304 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = { static int __init rk_iommu_init(void) { + struct device_node *np; int ret; + np = of_find_matching_node(NULL, rk_iommu_dt_ids); + if (!np) + return 0; + + of_node_put(np); + ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); if (ret) return ret; diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 463c235acbdc..4387dae14e45 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base; static void __iomem *main_int_base; static struct irq_domain *armada_370_xp_mpic_domain; static u32 doorbell_mask_reg; +static int parent_irq; #ifdef CONFIG_PCI_MSI static struct irq_domain *armada_370_xp_msi_domain; static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); @@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, { if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) armada_xp_mpic_smp_cpu_init(); + return NOTIFY_OK; } @@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = { .priority = 100, }; +static int mpic_cascaded_secondary_init(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); + + return NOTIFY_OK; +} + +static struct notifier_block mpic_cascaded_cpu_notifier = { + .notifier_call = mpic_cascaded_secondary_init, + .priority = 100, +}; + #endif /* CONFIG_SMP */ static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { @@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, struct device_node *parent) { struct resource main_int_res, per_cpu_int_res; - int parent_irq, nr_irqs, i; + int nr_irqs, i; u32 control; BUG_ON(of_address_to_resource(node, 0, &main_int_res)); @@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); #endif } else { +#ifdef CONFIG_SMP + register_cpu_notifier(&mpic_cascaded_cpu_notifier); +#endif irq_set_chained_handler(parent_irq, armada_370_xp_mpic_handle_cascade_irq); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d8996bdf0f61..596b0a9eee99 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its, { struct its_cmd_block *cmd, *sync_cmd, *next_cmd; struct its_collection *sync_col; + unsigned long flags; - raw_spin_lock(&its->lock); + raw_spin_lock_irqsave(&its->lock, flags); cmd = its_allocate_entry(its); if (!cmd) { /* We're soooooo screewed... */ pr_err_ratelimited("ITS can't allocate, dropping command\n"); - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); return; } sync_col = builder(cmd, desc); @@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its, post: next_cmd = its_post_commands(its); - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); its_wait_for_range_completion(its, cmd, next_cmd); } @@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its) { int err; int i; - int psz = PAGE_SIZE; + int psz = SZ_64K; u64 shr = GITS_BASER_InnerShareable; for (i = 0; i < GITS_BASER_NR_REGS; i++) { u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); u64 type = GITS_BASER_TYPE(val); u64 entry_size = GITS_BASER_ENTRY_SIZE(val); + int order = get_order(psz); + int alloc_size; u64 tmp; void *base; if (type == GITS_BASER_TYPE_NONE) continue; - /* We're lazy and only allocate a single page for now */ - base = (void *)get_zeroed_page(GFP_KERNEL); + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory. + * + * For other tables, only allocate a single page. + */ + if (type == GITS_BASER_TYPE_DEVICE) { + u64 typer = readq_relaxed(its->base + GITS_TYPER); + u32 ids = GITS_TYPER_DEVBITS(typer); + + order = get_order((1UL << ids) * entry_size); + if (order >= MAX_ORDER) { + order = MAX_ORDER - 1; + pr_warn("%s: Device Table too large, reduce its page order to %u\n", + its->msi_chip.of_node->full_name, order); + } + } + + alloc_size = (1 << order) * PAGE_SIZE; + base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!base) { err = -ENOMEM; goto out_free; @@ -841,7 +864,7 @@ retry_baser: break; } - val |= (PAGE_SIZE / psz) - 1; + val |= (alloc_size / psz) - 1; writeq_relaxed(val, its->base + GITS_BASER + i * 8); tmp = readq_relaxed(its->base + GITS_BASER + i * 8); @@ -882,7 +905,7 @@ retry_baser: } pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", - (int)(PAGE_SIZE / entry_size), + (int)(alloc_size / entry_size), its_base_type_string[type], (unsigned long)virt_to_phys(base), psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); @@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void) static struct its_device *its_find_device(struct its_node *its, u32 dev_id) { struct its_device *its_dev = NULL, *tmp; + unsigned long flags; - raw_spin_lock(&its->lock); + raw_spin_lock_irqsave(&its->lock, flags); list_for_each_entry(tmp, &its->its_device_list, entry) { if (tmp->device_id == dev_id) { @@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id) } } - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); return its_dev; } @@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, { struct its_device *dev; unsigned long *lpi_map; + unsigned long flags; void *itt; int lpi_base; int nr_lpis; @@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, nr_ites = max(2UL, roundup_pow_of_two(nvecs)); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; - itt = kmalloc(sz, GFP_KERNEL); + itt = kzalloc(sz, GFP_KERNEL); lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); if (!dev || !itt || !lpi_map) { @@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev->device_id = dev_id; INIT_LIST_HEAD(&dev->entry); - raw_spin_lock(&its->lock); + raw_spin_lock_irqsave(&its->lock, flags); list_add(&dev->entry, &its->its_device_list); - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); /* Bind the device to the first possible CPU */ cpu = cpumask_first(cpu_online_mask); @@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, static void its_free_device(struct its_device *its_dev) { - raw_spin_lock(&its_dev->its->lock); + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); list_del(&its_dev->entry); - raw_spin_unlock(&its_dev->its->lock); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); kfree(its_dev->itt); kfree(its_dev); } @@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) return 0; } +struct its_pci_alias { + struct pci_dev *pdev; + u32 dev_id; + u32 count; +}; + +static int its_pci_msi_vec_count(struct pci_dev *pdev) +{ + int msi, msix; + + msi = max(pci_msi_vec_count(pdev), 0); + msix = max(pci_msix_vec_count(pdev), 0); + + return max(msi, msix); +} + +static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) +{ + struct its_pci_alias *dev_alias = data; + + dev_alias->dev_id = alias; + if (pdev != dev_alias->pdev) + dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); + + return 0; +} + static int its_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *info) { struct pci_dev *pdev; struct its_node *its; - u32 dev_id; struct its_device *its_dev; + struct its_pci_alias dev_alias; if (!dev_is_pci(dev)) return -EINVAL; pdev = to_pci_dev(dev); - dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); + dev_alias.pdev = pdev; + dev_alias.count = nvec; + + pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); its = domain->parent->host_data; - its_dev = its_find_device(its, dev_id); - if (WARN_ON(its_dev)) - return -EINVAL; + its_dev = its_find_device(its, dev_alias.dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id); + goto out; + } - its_dev = its_create_device(its, dev_id, nvec); + its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count); if (!its_dev) return -ENOMEM; - dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); - + dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", + dev_alias.count, ilog2(dev_alias.count)); +out: info->scratchpad[0].ptr = its_dev; info->scratchpad[1].ptr = dev; return 0; @@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = { .deactivate = its_irq_domain_deactivate, }; +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~GITS_CTLR_ENABLE; + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + static int its_probe(struct device_node *node, struct irq_domain *parent) { struct resource res; @@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) goto out_unmap; } + err = its_force_quiescent(its_base); + if (err) { + pr_warn("%s: failed to quiesce, giving up\n", + node->full_name); + goto out_unmap; + } + pr_info("ITS: %s\n", node->full_name); its = kzalloc(sizeof(*its), GFP_KERNEL); @@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) writeq_relaxed(baser, its->base + GITS_CBASER); tmp = readq_relaxed(its->base + GITS_CBASER); writeq_relaxed(0, its->base + GITS_CWRITER); - writel_relaxed(1, its->base + GITS_CTLR); + writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { pr_info("ITS: using cache flushing for cmd queue\n"); @@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void) int its_cpu_init(void) { - if (!gic_rdists_supports_plpis()) { - pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); - return -ENXIO; - } - if (!list_empty(&its_nodes)) { + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } its_cpu_init_lpis(); its_cpu_init_collection(); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1c6dea2fbc34..fd8850def1b8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, tlist |= 1 << (mpidr & 0xf); cpu = cpumask_next(cpu, mask); - if (cpu == nr_cpu_ids) + if (cpu >= nr_cpu_ids) goto out; mpidr = cpu_logical_map(cpu); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4634cf7d0ec3..471e1cdc1933 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d) static void gic_mask_irq(struct irq_data *d) { u32 mask = 1 << (gic_irq(d) % 32); + unsigned long flags; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); if (gic_arch_extn.irq_mask) gic_arch_extn.irq_mask(d); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } static void gic_unmask_irq(struct irq_data *d) { u32 mask = 1 << (gic_irq(d) % 32); + unsigned long flags; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); if (gic_arch_extn.irq_unmask) gic_arch_extn.irq_unmask(d); writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } static void gic_eoi_irq(struct irq_data *d) @@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) { void __iomem *base = gic_dist_base(d); unsigned int gicirq = gic_irq(d); + unsigned long flags; int ret; /* Interrupt configuration for SGIs can't be changed */ @@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type) type != IRQ_TYPE_EDGE_RISING) return -EINVAL; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); if (gic_arch_extn.irq_set_type) gic_arch_extn.irq_set_type(d, type); ret = gic_configure_irq(gicirq, type, base, NULL); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); return ret; } @@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); unsigned int cpu, shift = (gic_irq(d) % 4) * 8; u32 val, mask, bit; + unsigned long flags; if (!force) cpu = cpumask_any_and(mask_val, cpu_online_mask); @@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); mask = 0xff << shift; bit = gic_cpu_map[cpu] << shift; val = readl_relaxed(reg) & ~mask; writel_relaxed(val | bit, reg); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); return IRQ_SET_MASK_OK; } diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c index c8ced12fa452..1cfcea62aed9 100644 --- a/drivers/isdn/gigaset/ev-layer.c +++ b/drivers/isdn/gigaset/ev-layer.c @@ -389,22 +389,49 @@ zsau_resp[] = {NULL, ZSAU_UNKNOWN} }; -/* retrieve CID from parsed response - * returns 0 if no CID, -1 if invalid CID, or CID value 1..65535 +/* check for and remove fixed string prefix + * If s starts with prefix terminated by a non-alphanumeric character, + * return pointer to the first character after that, otherwise return NULL. */ -static int cid_of_response(char *s) +static char *skip_prefix(char *s, const char *prefix) { - int cid; - int rc; - - if (s[-1] != ';') - return 0; /* no CID separator */ - rc = kstrtoint(s, 10, &cid); - if (rc) - return 0; /* CID not numeric */ - if (cid < 1 || cid > 65535) - return -1; /* CID out of range */ - return cid; + while (*prefix) + if (*s++ != *prefix++) + return NULL; + if (isalnum(*s)) + return NULL; + return s; +} + +/* queue event with CID */ +static void add_cid_event(struct cardstate *cs, int cid, int type, + void *ptr, int parameter) +{ + unsigned long flags; + unsigned next, tail; + struct event_t *event; + + gig_dbg(DEBUG_EVENT, "queueing event %d for cid %d", type, cid); + + spin_lock_irqsave(&cs->ev_lock, flags); + + tail = cs->ev_tail; + next = (tail + 1) % MAX_EVENTS; + if (unlikely(next == cs->ev_head)) { + dev_err(cs->dev, "event queue full\n"); + kfree(ptr); + } else { + event = cs->events + tail; + event->type = type; + event->cid = cid; + event->ptr = ptr; + event->arg = NULL; + event->parameter = parameter; + event->at_state = NULL; + cs->ev_tail = next; + } + + spin_unlock_irqrestore(&cs->ev_lock, flags); } /** @@ -417,190 +444,188 @@ static int cid_of_response(char *s) */ void gigaset_handle_modem_response(struct cardstate *cs) { - unsigned char *argv[MAX_REC_PARAMS + 1]; - int params; - int i, j; + char *eoc, *psep, *ptr; const struct resp_type_t *rt; const struct zsau_resp_t *zr; - int curarg; - unsigned long flags; - unsigned next, tail, head; - struct event_t *event; - int resp_code; - int param_type; - int abort; - size_t len; - int cid; - int rawstring; - - len = cs->cbytes; - if (!len) { + int cid, parameter; + u8 type, value; + + if (!cs->cbytes) { /* ignore additional LFs/CRs (M10x config mode or cx100) */ gig_dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[0]); return; } - cs->respdata[len] = 0; - argv[0] = cs->respdata; - params = 1; + cs->respdata[cs->cbytes] = 0; + if (cs->at_state.getstring) { - /* getstring only allowed without cid at the moment */ + /* state machine wants next line verbatim */ cs->at_state.getstring = 0; - rawstring = 1; - cid = 0; - } else { - /* parse line */ - for (i = 0; i < len; i++) - switch (cs->respdata[i]) { - case ';': - case ',': - case '=': - if (params > MAX_REC_PARAMS) { - dev_warn(cs->dev, - "too many parameters in response\n"); - /* need last parameter (might be CID) */ - params--; - } - argv[params++] = cs->respdata + i + 1; - } - - rawstring = 0; - cid = params > 1 ? cid_of_response(argv[params - 1]) : 0; - if (cid < 0) { - gigaset_add_event(cs, &cs->at_state, RSP_INVAL, - NULL, 0, NULL); - return; - } + ptr = kstrdup(cs->respdata, GFP_ATOMIC); + gig_dbg(DEBUG_EVENT, "string==%s", ptr ? ptr : "NULL"); + add_cid_event(cs, 0, RSP_STRING, ptr, 0); + return; + } - for (j = 1; j < params; ++j) - argv[j][-1] = 0; + /* look up response type */ + for (rt = resp_type; rt->response; ++rt) { + eoc = skip_prefix(cs->respdata, rt->response); + if (eoc) + break; + } + if (!rt->response) { + add_cid_event(cs, 0, RSP_NONE, NULL, 0); + gig_dbg(DEBUG_EVENT, "unknown modem response: '%s'\n", + cs->respdata); + return; + } - gig_dbg(DEBUG_EVENT, "CMD received: %s", argv[0]); - if (cid) { - --params; - gig_dbg(DEBUG_EVENT, "CID: %s", argv[params]); - } - gig_dbg(DEBUG_EVENT, "available params: %d", params - 1); - for (j = 1; j < params; j++) - gig_dbg(DEBUG_EVENT, "param %d: %s", j, argv[j]); + /* check for CID */ + psep = strrchr(cs->respdata, ';'); + if (psep && + !kstrtoint(psep + 1, 10, &cid) && + cid >= 1 && cid <= 65535) { + /* valid CID: chop it off */ + *psep = 0; + } else { + /* no valid CID: leave unchanged */ + cid = 0; } - spin_lock_irqsave(&cs->ev_lock, flags); - head = cs->ev_head; - tail = cs->ev_tail; + gig_dbg(DEBUG_EVENT, "CMD received: %s", cs->respdata); + if (cid) + gig_dbg(DEBUG_EVENT, "CID: %d", cid); - abort = 1; - curarg = 0; - while (curarg < params) { - next = (tail + 1) % MAX_EVENTS; - if (unlikely(next == head)) { - dev_err(cs->dev, "event queue full\n"); - break; - } + switch (rt->type) { + case RT_NOTHING: + /* check parameter separator */ + if (*eoc) + goto bad_param; /* extra parameter */ - event = cs->events + tail; - event->at_state = NULL; - event->cid = cid; - event->ptr = NULL; - event->arg = NULL; - tail = next; + add_cid_event(cs, cid, rt->resp_code, NULL, 0); + break; - if (rawstring) { - resp_code = RSP_STRING; - param_type = RT_STRING; - } else { - for (rt = resp_type; rt->response; ++rt) - if (!strcmp(argv[curarg], rt->response)) + case RT_RING: + /* check parameter separator */ + if (!*eoc) + eoc = NULL; /* no parameter */ + else if (*eoc++ != ',') + goto bad_param; + + add_cid_event(cs, 0, rt->resp_code, NULL, cid); + + /* process parameters as individual responses */ + while (eoc) { + /* look up parameter type */ + psep = NULL; + for (rt = resp_type; rt->response; ++rt) { + psep = skip_prefix(eoc, rt->response); + if (psep) break; + } - if (!rt->response) { - event->type = RSP_NONE; - gig_dbg(DEBUG_EVENT, - "unknown modem response: '%s'\n", - argv[curarg]); - break; + /* all legal parameters are of type RT_STRING */ + if (!psep || rt->type != RT_STRING) { + dev_warn(cs->dev, + "illegal RING parameter: '%s'\n", + eoc); + return; } - resp_code = rt->resp_code; - param_type = rt->type; - ++curarg; - } + /* skip parameter value separator */ + if (*psep++ != '=') + goto bad_param; - event->type = resp_code; + /* look up end of parameter */ + eoc = strchr(psep, ','); + if (eoc) + *eoc++ = 0; - switch (param_type) { - case RT_NOTHING: - break; - case RT_RING: - if (!cid) { - dev_err(cs->dev, - "received RING without CID!\n"); - event->type = RSP_INVAL; - abort = 1; - } else { - event->cid = 0; - event->parameter = cid; - abort = 0; - } + /* retrieve parameter value */ + ptr = kstrdup(psep, GFP_ATOMIC); + + /* queue event */ + add_cid_event(cs, cid, rt->resp_code, ptr, 0); + } + break; + + case RT_ZSAU: + /* check parameter separator */ + if (!*eoc) { + /* no parameter */ + add_cid_event(cs, cid, rt->resp_code, NULL, ZSAU_NONE); break; - case RT_ZSAU: - if (curarg >= params) { - event->parameter = ZSAU_NONE; + } + if (*eoc++ != '=') + goto bad_param; + + /* look up parameter value */ + for (zr = zsau_resp; zr->str; ++zr) + if (!strcmp(eoc, zr->str)) break; - } - for (zr = zsau_resp; zr->str; ++zr) - if (!strcmp(argv[curarg], zr->str)) - break; - event->parameter = zr->code; - if (!zr->str) - dev_warn(cs->dev, - "%s: unknown parameter %s after ZSAU\n", - __func__, argv[curarg]); - ++curarg; - break; - case RT_STRING: - if (curarg < params) { - event->ptr = kstrdup(argv[curarg], GFP_ATOMIC); - if (!event->ptr) - dev_err(cs->dev, "out of memory\n"); - ++curarg; - } - gig_dbg(DEBUG_EVENT, "string==%s", - event->ptr ? (char *) event->ptr : "NULL"); - break; - case RT_ZCAU: - event->parameter = -1; - if (curarg + 1 < params) { - u8 type, value; - - i = kstrtou8(argv[curarg++], 16, &type); - j = kstrtou8(argv[curarg++], 16, &value); - if (i == 0 && j == 0) - event->parameter = (type << 8) | value; - } else - curarg = params - 1; - break; - case RT_NUMBER: - if (curarg >= params || - kstrtoint(argv[curarg++], 10, &event->parameter)) - event->parameter = -1; - gig_dbg(DEBUG_EVENT, "parameter==%d", event->parameter); - break; + if (!zr->str) + goto bad_param; + + add_cid_event(cs, cid, rt->resp_code, NULL, zr->code); + break; + + case RT_STRING: + /* check parameter separator */ + if (*eoc++ != '=') + goto bad_param; + + /* retrieve parameter value */ + ptr = kstrdup(eoc, GFP_ATOMIC); + + /* queue event */ + add_cid_event(cs, cid, rt->resp_code, ptr, 0); + break; + + case RT_ZCAU: + /* check parameter separators */ + if (*eoc++ != '=') + goto bad_param; + psep = strchr(eoc, ','); + if (!psep) + goto bad_param; + *psep++ = 0; + + /* decode parameter values */ + if (kstrtou8(eoc, 16, &type) || kstrtou8(psep, 16, &value)) { + *--psep = ','; + goto bad_param; } + parameter = (type << 8) | value; - if (resp_code == RSP_ZDLE) - cs->dle = event->parameter; + add_cid_event(cs, cid, rt->resp_code, NULL, parameter); + break; - if (abort) - break; - } + case RT_NUMBER: + /* check parameter separator */ + if (*eoc++ != '=') + goto bad_param; - cs->ev_tail = tail; - spin_unlock_irqrestore(&cs->ev_lock, flags); + /* decode parameter value */ + if (kstrtoint(eoc, 10, ¶meter)) + goto bad_param; + + /* special case ZDLE: set flag before queueing event */ + if (rt->resp_code == RSP_ZDLE) + cs->dle = parameter; - if (curarg != params) - gig_dbg(DEBUG_EVENT, - "invalid number of processed parameters: %d/%d", - curarg, params); + add_cid_event(cs, cid, rt->resp_code, NULL, parameter); + break; + +bad_param: + /* parameter unexpected, incomplete or malformed */ + dev_warn(cs->dev, "bad parameter in response '%s'\n", + cs->respdata); + add_cid_event(cs, cid, rt->resp_code, NULL, -1); + break; + + default: + dev_err(cs->dev, "%s: internal error on '%s'\n", + __func__, cs->respdata); + } } EXPORT_SYMBOL_GPL(gigaset_handle_modem_response); diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c index 6a7447c304ac..358a574d9e8b 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/isdn/icn/icn.c @@ -1609,7 +1609,7 @@ icn_setup(char *line) if (ints[0] > 1) membase = (unsigned long)ints[2]; if (str && *str) { - strcpy(sid, str); + strlcpy(sid, str, sizeof(sid)); icn_id = sid; if ((p = strchr(sid, ','))) { *p++ = 0; diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c index e9f1d8d84613..c53f14a7ce54 100644 --- a/drivers/mmc/core/pwrseq_simple.c +++ b/drivers/mmc/core/pwrseq_simple.c @@ -124,7 +124,7 @@ int mmc_pwrseq_simple_alloc(struct mmc_host *host, struct device *dev) PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) { ret = PTR_ERR(pwrseq->reset_gpios[i]); - while (--i) + while (i--) gpiod_put(pwrseq->reset_gpios[i]); goto clk_put; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 5b76a173cd95..5897d8d8fa5a 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -526,6 +526,7 @@ config MTD_NAND_SUNXI config MTD_NAND_HISI504 tristate "Support for NAND controller on Hisilicon SoC Hip04" + depends on HAS_DMA help Enables support for NAND controller on Hisilicon SoC Hip04. diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 96b0b1d27df1..10b1f7a4fe50 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) nand_writel(info, NDCR, ndcr | int_mask); } +static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) +{ + if (info->ecc_bch) { + int timeout; + + /* + * According to the datasheet, when reading from NDDB + * with BCH enabled, after each 32 bytes reads, we + * have to make sure that the NDSR.RDDREQ bit is set. + * + * Drain the FIFO 8 32 bits reads at a time, and skip + * the polling on the last read. + */ + while (len > 8) { + __raw_readsl(info->mmio_base + NDDB, data, 8); + + for (timeout = 0; + !(nand_readl(info, NDSR) & NDSR_RDDREQ); + timeout++) { + if (timeout >= 5) { + dev_err(&info->pdev->dev, + "Timeout on RDDREQ while draining the FIFO\n"); + return; + } + + mdelay(1); + } + + data += 32; + len -= 8; + } + } + + __raw_readsl(info->mmio_base + NDDB, data, len); +} + static void handle_data_pio(struct pxa3xx_nand_info *info) { unsigned int do_bytes = min(info->data_size, info->chunk_size); @@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) DIV_ROUND_UP(info->oob_size, 4)); break; case STATE_PIO_READING: - __raw_readsl(info->mmio_base + NDDB, - info->data_buff + info->data_buff_pos, - DIV_ROUND_UP(do_bytes, 4)); + drain_fifo(info, + info->data_buff + info->data_buff_pos, + DIV_ROUND_UP(do_bytes, 4)); if (info->oob_size > 0) - __raw_readsl(info->mmio_base + NDDB, - info->oob_buff + info->oob_buff_pos, - DIV_ROUND_UP(info->oob_size, 4)); + drain_fifo(info, + info->oob_buff + info->oob_buff_pos, + DIV_ROUND_UP(info->oob_size, 4)); break; default: dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, @@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev) int ret, irq, cs; pdata = dev_get_platdata(&pdev->dev); + if (pdata->num_cs <= 0) + return -ENODEV; info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + sizeof(*host)) * pdata->num_cs, GFP_KERNEL); if (!info) diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 98d73aab52fe..58808f651452 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -131,7 +131,7 @@ config CAN_RCAR config CAN_XILINXCAN tristate "Xilinx CAN" - depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST + depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST depends on COMMON_CLK && HAS_IOMEM ---help--- Xilinx CAN driver. This driver supports both soft AXI CAN IP and diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c index b1e8851d3cc4..866e5e12fdd2 100644 --- a/drivers/net/can/cc770/cc770_platform.c +++ b/drivers/net/can/cc770/cc770_platform.c @@ -254,7 +254,7 @@ static int cc770_platform_remove(struct platform_device *pdev) return 0; } -static struct of_device_id cc770_platform_table[] = { +static const struct of_device_id cc770_platform_table[] = { {.compatible = "bosch,cc770"}, /* CC770 from Bosch */ {.compatible = "intc,82527"}, /* AN82527 from Intel CP */ {}, diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index fed1bbd0b0d2..e3d7e22a4fa0 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1725,7 +1725,7 @@ static int grcan_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id grcan_match[] = { +static const struct of_device_id grcan_match[] = { {.name = "GAISLER_GRCAN"}, {.name = "01_03d"}, {.name = "GAISLER_GRHCAN"}, diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index ad024e60ba8c..c7427bdd3a4b 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -43,7 +43,7 @@ struct mpc5xxx_can_data { }; #ifdef CONFIG_PPC_MPC52xx -static struct of_device_id mpc52xx_cdm_ids[] = { +static const struct of_device_id mpc52xx_cdm_ids[] = { { .compatible = "fsl,mpc5200-cdm", }, {} }; diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index 93115250eaf5..0552ed46a206 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -242,7 +242,7 @@ static int sp_remove(struct platform_device *pdev) return 0; } -static struct of_device_id sp_of_table[] = { +static const struct of_device_id sp_of_table[] = { {.compatible = "nxp,sja1000"}, {}, }; diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index a316fa4b91ab..e97a08ce0b90 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -14,6 +14,7 @@ * Copyright (C) 2015 Valeo S.A. */ +#include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/completion.h> #include <linux/module.h> @@ -467,10 +468,11 @@ struct kvaser_usb { struct kvaser_usb_net_priv { struct can_priv can; - atomic_t active_tx_urbs; - struct usb_anchor tx_submitted; + spinlock_t tx_contexts_lock; + int active_tx_contexts; struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; + struct usb_anchor tx_submitted; struct completion start_comp, stop_comp; struct kvaser_usb *dev; @@ -694,6 +696,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, struct kvaser_usb_net_priv *priv; struct sk_buff *skb; struct can_frame *cf; + unsigned long flags; u8 channel, tid; channel = msg->u.tx_acknowledge_header.channel; @@ -737,12 +740,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, stats->tx_packets++; stats->tx_bytes += context->dlc; - can_get_echo_skb(priv->netdev, context->echo_index); - context->echo_index = MAX_TX_URBS; - atomic_dec(&priv->active_tx_urbs); + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + can_get_echo_skb(priv->netdev, context->echo_index); + context->echo_index = MAX_TX_URBS; + --priv->active_tx_contexts; netif_wake_queue(priv->netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); } static void kvaser_usb_simple_msg_callback(struct urb *urb) @@ -803,17 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, return 0; } -static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) -{ - int i; - - usb_kill_anchored_urbs(&priv->tx_submitted); - atomic_set(&priv->active_tx_urbs, 0); - - for (i = 0; i < MAX_TX_URBS; i++) - priv->tx_contexts[i].echo_index = MAX_TX_URBS; -} - static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, const struct kvaser_usb_error_summary *es, struct can_frame *cf) @@ -1515,6 +1510,24 @@ error: return err; } +static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) +{ + int i; + + priv->active_tx_contexts = 0; + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; +} + +/* This method might sleep. Do not call it in the atomic context + * of URB completions. + */ +static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) +{ + usb_kill_anchored_urbs(&priv->tx_submitted); + kvaser_usb_reset_tx_urb_contexts(priv); +} + static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) { int i; @@ -1634,6 +1647,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, struct kvaser_msg *msg; int i, err, ret = NETDEV_TX_OK; u8 *msg_tx_can_flags = NULL; /* GCC */ + unsigned long flags; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; @@ -1687,12 +1701,21 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_RTR_FLAG) *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; + spin_lock_irqsave(&priv->tx_contexts_lock, flags); for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &priv->tx_contexts[i]; + + context->echo_index = i; + can_put_echo_skb(skb, netdev, context->echo_index); + ++priv->active_tx_contexts; + if (priv->active_tx_contexts >= MAX_TX_URBS) + netif_stop_queue(netdev); + break; } } + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); /* This should never happen; it implies a flow control bug */ if (!context) { @@ -1704,7 +1727,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, } context->priv = priv; - context->echo_index = i; context->dlc = cf->can_dlc; msg->u.tx_can.tid = context->echo_index; @@ -1716,18 +1738,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, kvaser_usb_write_bulk_callback, context); usb_anchor_urb(urb, &priv->tx_submitted); - can_put_echo_skb(skb, netdev, context->echo_index); - - atomic_inc(&priv->active_tx_urbs); - - if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) - netif_stop_queue(netdev); - err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + can_free_echo_skb(netdev, context->echo_index); + context->echo_index = MAX_TX_URBS; + --priv->active_tx_contexts; + netif_wake_queue(netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); - atomic_dec(&priv->active_tx_urbs); usb_unanchor_urb(urb); stats->tx_dropped++; @@ -1854,7 +1875,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf, struct kvaser_usb *dev = usb_get_intfdata(intf); struct net_device *netdev; struct kvaser_usb_net_priv *priv; - int i, err; + int err; err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); if (err) @@ -1868,19 +1889,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf, priv = netdev_priv(netdev); + init_usb_anchor(&priv->tx_submitted); init_completion(&priv->start_comp); init_completion(&priv->stop_comp); - init_usb_anchor(&priv->tx_submitted); - atomic_set(&priv->active_tx_urbs, 0); - - for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) - priv->tx_contexts[i].echo_index = MAX_TX_URBS; - priv->dev = dev; priv->netdev = netdev; priv->channel = channel; + spin_lock_init(&priv->tx_contexts_lock); + kvaser_usb_reset_tx_urb_contexts(priv); + priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = CAN_USB_CLOCK; priv->can.bittiming_const = &kvaser_usb_bittiming_const; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 6c6764312285..6bddfe062b51 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1185,7 +1185,7 @@ static int xcan_remove(struct platform_device *pdev) } /* Match table for OF platform binding */ -static struct of_device_id xcan_of_match[] = { +static const struct of_device_id xcan_of_match[] = { { .compatible = "xlnx,zynq-can-1.0", }, { .compatible = "xlnx,axi-can-1.00.a", }, { /* end of list */ }, diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 48e62a34f7f2..18550c7ebe6f 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -7,7 +7,7 @@ config NET_DSA_MV88E6XXX config NET_DSA_MV88E6060 tristate "Marvell 88E6060 ethernet switch chip support" - select NET_DSA + depends on NET_DSA select NET_DSA_TAG_TRAILER ---help--- This enables support for the Marvell 88E6060 ethernet switch @@ -19,7 +19,7 @@ config NET_DSA_MV88E6XXX_NEED_PPU config NET_DSA_MV88E6131 tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" - select NET_DSA + depends on NET_DSA select NET_DSA_MV88E6XXX select NET_DSA_MV88E6XXX_NEED_PPU select NET_DSA_TAG_DSA @@ -29,7 +29,7 @@ config NET_DSA_MV88E6131 config NET_DSA_MV88E6123_61_65 tristate "Marvell 88E6123/6161/6165 ethernet switch chip support" - select NET_DSA + depends on NET_DSA select NET_DSA_MV88E6XXX select NET_DSA_TAG_EDSA ---help--- @@ -38,7 +38,7 @@ config NET_DSA_MV88E6123_61_65 config NET_DSA_MV88E6171 tristate "Marvell 88E6171/6172 ethernet switch chip support" - select NET_DSA + depends on NET_DSA select NET_DSA_MV88E6XXX select NET_DSA_TAG_EDSA ---help--- @@ -47,7 +47,7 @@ config NET_DSA_MV88E6171 config NET_DSA_MV88E6352 tristate "Marvell 88E6176/88E6352 ethernet switch chip support" - select NET_DSA + depends on NET_DSA select NET_DSA_MV88E6XXX select NET_DSA_TAG_EDSA ---help--- @@ -56,8 +56,7 @@ config NET_DSA_MV88E6352 config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" - depends on HAS_IOMEM - select NET_DSA + depends on HAS_IOMEM && NET_DSA select NET_DSA_TAG_BRCM select FIXED_PHY select BCM7XXX_PHY diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 2b8bfeeee9cf..ae89de7deb13 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1588,7 +1588,7 @@ static int greth_of_remove(struct platform_device *of_dev) return 0; } -static struct of_device_id greth_of_match[] = { +static const struct of_device_id greth_of_match[] = { { .name = "GAISLER_ETHMAC", }, diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index fd9296a5014d..79ea35869e1e 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -89,7 +89,7 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); #define TXQUEUESTOP_THRESHHOLD 2 -static struct of_device_id altera_tse_ids[]; +static const struct of_device_id altera_tse_ids[]; static inline u32 tse_tx_avail(struct altera_tse_private *priv) { @@ -1576,7 +1576,7 @@ static const struct altera_dmaops altera_dtype_msgdma = { .start_rxdma = msgdma_start_rxdma, }; -static struct of_device_id altera_tse_ids[] = { +static const struct of_device_id altera_tse_ids[] = { { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, }, { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, }, { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, }, diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 8eb37e0194b5..bc8b04f42882 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) { struct pcnet32_private *lp; int i, media; - int fdx, mii, fset, dxsuflo; + int fdx, mii, fset, dxsuflo, sram; int chip_version; char *chipname; struct net_device *dev; @@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } /* initialize variables */ - fdx = mii = fset = dxsuflo = 0; + fdx = mii = fset = dxsuflo = sram = 0; chip_version = (chip_version >> 12) & 0xffff; switch (chip_version) { @@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) chipname = "PCnet/FAST III 79C973"; /* PCI */ fdx = 1; mii = 1; + sram = 1; break; case 0x2626: chipname = "PCnet/Home 79C978"; /* PCI */ @@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) chipname = "PCnet/FAST III 79C975"; /* PCI */ fdx = 1; mii = 1; + sram = 1; break; case 0x2628: chipname = "PCnet/PRO 79C976"; @@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) dxsuflo = 1; } + /* + * The Am79C973/Am79C975 controllers come with 12K of SRAM + * which we can use for the Tx/Rx buffers but most importantly, + * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid + * Tx fifo underflows. + */ + if (sram) { + /* + * The SRAM is being configured in two steps. First we + * set the SRAM size in the BCR25:SRAM_SIZE bits. According + * to the datasheet, each bit corresponds to a 512-byte + * page so we can have at most 24 pages. The SRAM_SIZE + * holds the value of the upper 8 bits of the 16-bit SRAM size. + * The low 8-bits start at 0x00 and end at 0xff. So the + * address range is from 0x0000 up to 0x17ff. Therefore, + * the SRAM_SIZE is set to 0x17. The next step is to set + * the BCR26:SRAM_BND midway through so the Tx and Rx + * buffers can share the SRAM equally. + */ + a->write_bcr(ioaddr, 25, 0x17); + a->write_bcr(ioaddr, 26, 0xc); + /* And finally enable the NOUFLO bit */ + a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11)); + } + dev = alloc_etherdev(sizeof(*lp)); if (!dev) { ret = -ENOMEM; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 29a09271b64a..34c28aac767f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -365,6 +365,8 @@ #define MAC_HWF0R_TXCOESEL_WIDTH 1 #define MAC_HWF0R_VLHASH_INDEX 4 #define MAC_HWF0R_VLHASH_WIDTH 1 +#define MAC_HWF1R_ADDR64_INDEX 14 +#define MAC_HWF1R_ADDR64_WIDTH 2 #define MAC_HWF1R_ADVTHWORD_INDEX 13 #define MAC_HWF1R_ADVTHWORD_WIDTH 1 #define MAC_HWF1R_DBGMEMA_INDEX 19 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 400757b49872..80dd7a92f357 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1068,7 +1068,7 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) rdesc->desc3 = 0; /* Make sure ownership is written to the descriptor */ - wmb(); + dma_wmb(); } static void xgbe_tx_desc_init(struct xgbe_channel *channel) @@ -1124,12 +1124,12 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata) * is written to the descriptor(s) before setting the OWN bit * for the descriptor */ - wmb(); + dma_wmb(); XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); /* Make sure ownership is written to the descriptor */ - wmb(); + dma_wmb(); } static void xgbe_rx_desc_init(struct xgbe_channel *channel) @@ -1358,18 +1358,20 @@ static void xgbe_tx_start_xmit(struct xgbe_channel *channel, struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring_data *rdata; + /* Make sure everything is written before the register write */ + wmb(); + /* Issue a poll command to Tx DMA by writing address * of next immediate free descriptor */ rdata = XGBE_GET_DESC_DATA(ring, ring->cur); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, lower_32_bits(rdata->rdesc_dma)); - /* Start the Tx coalescing timer */ + /* Start the Tx timer */ if (pdata->tx_usecs && !channel->tx_timer_active) { channel->tx_timer_active = 1; - hrtimer_start(&channel->tx_timer, - ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC), - HRTIMER_MODE_REL); + mod_timer(&channel->tx_timer, + jiffies + usecs_to_jiffies(pdata->tx_usecs)); } ring->tx.xmit_more = 0; @@ -1565,7 +1567,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) * is written to the descriptor(s) before setting the OWN bit * for the first descriptor */ - wmb(); + dma_wmb(); /* Set OWN bit for the first descriptor */ rdata = XGBE_GET_DESC_DATA(ring, start_index); @@ -1577,7 +1579,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) #endif /* Make sure ownership is written to the descriptor */ - wmb(); + dma_wmb(); ring->cur = cur_index + 1; if (!packet->skb->xmit_more || @@ -1613,7 +1615,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel) return 1; /* Make sure descriptor fields are read after reading the OWN bit */ - rmb(); + dma_rmb(); #ifdef XGMAC_ENABLE_RX_DESC_DUMP xgbe_dump_rx_desc(ring, rdesc, ring->cur); @@ -2004,7 +2006,8 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); - netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n", + netdev_notice(pdata->netdev, + "%d Tx hardware queues, %d byte fifo per queue\n", pdata->tx_q_count, ((fifo_size + 1) * 256)); } @@ -2019,7 +2022,8 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); - netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n", + netdev_notice(pdata->netdev, + "%d Rx hardware queues, %d byte fifo per queue\n", pdata->rx_q_count, ((fifo_size + 1) * 256)); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 885b02b5be07..347fe2419a18 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -411,11 +411,9 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data) return IRQ_HANDLED; } -static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) +static void xgbe_tx_timer(unsigned long data) { - struct xgbe_channel *channel = container_of(timer, - struct xgbe_channel, - tx_timer); + struct xgbe_channel *channel = (struct xgbe_channel *)data; struct xgbe_prv_data *pdata = channel->pdata; struct napi_struct *napi; @@ -437,8 +435,6 @@ static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer) channel->tx_timer_active = 0; DBGPR("<--xgbe_tx_timer\n"); - - return HRTIMER_NORESTART; } static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) @@ -454,9 +450,8 @@ static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) break; DBGPR(" %s adding tx timer\n", channel->name); - hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - channel->tx_timer.function = xgbe_tx_timer; + setup_timer(&channel->tx_timer, xgbe_tx_timer, + (unsigned long)channel); } DBGPR("<--xgbe_init_tx_timers\n"); @@ -475,8 +470,7 @@ static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata) break; DBGPR(" %s deleting tx timer\n", channel->name); - channel->tx_timer_active = 0; - hrtimer_cancel(&channel->tx_timer); + del_timer_sync(&channel->tx_timer); } DBGPR("<--xgbe_stop_tx_timers\n"); @@ -519,6 +513,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) RXFIFOSIZE); hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TXFIFOSIZE); + hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); @@ -553,6 +548,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) break; } + /* Translate the address width setting into actual number */ + switch (hw_feat->dma_width) { + case 0: + hw_feat->dma_width = 32; + break; + case 1: + hw_feat->dma_width = 40; + break; + case 2: + hw_feat->dma_width = 48; + break; + default: + hw_feat->dma_width = 32; + } + /* The Queue, Channel and TC counts are zero based so increment them * to get the actual number */ @@ -692,6 +702,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) DBGPR("-->xgbe_init_rx_coalesce\n"); pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); + pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS; pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; hw_if->config_rx_coalesce(pdata); @@ -1800,6 +1811,9 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) ring->dirty++; } + /* Make sure everything is written before the register write */ + wmb(); + /* Update the Rx Tail Pointer Register with address of * the last cleaned entry */ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1); @@ -1807,16 +1821,15 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel) lower_32_bits(rdata->rdesc_dma)); } -static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, +static struct sk_buff *xgbe_create_skb(struct napi_struct *napi, struct xgbe_ring_data *rdata, unsigned int *len) { - struct net_device *netdev = pdata->netdev; struct sk_buff *skb; u8 *packet; unsigned int copy_len; - skb = netdev_alloc_skb_ip_align(netdev, rdata->rx.hdr.dma_len); + skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); if (!skb) return NULL; @@ -1863,7 +1876,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) /* Make sure descriptor fields are read after reading the OWN * bit */ - rmb(); + dma_rmb(); #ifdef XGMAC_ENABLE_TX_DESC_DUMP xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); @@ -1986,7 +1999,7 @@ read_again: rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); - skb = xgbe_create_skb(pdata, rdata, &put_len); + skb = xgbe_create_skb(napi, rdata, &put_len); if (!skb) { error = 1; goto skip_data; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index ebf489351555..b4f6eaaa08f0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -291,7 +291,6 @@ static int xgbe_get_settings(struct net_device *netdev, return -ENODEV; ret = phy_ethtool_gset(pdata->phydev, cmd); - cmd->transceiver = XCVR_EXTERNAL; DBGPR("<--xgbe_get_settings\n"); @@ -378,18 +377,14 @@ static int xgbe_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned int riwt; DBGPR("-->xgbe_get_coalesce\n"); memset(ec, 0, sizeof(struct ethtool_coalesce)); - riwt = pdata->rx_riwt; - ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt); + ec->rx_coalesce_usecs = pdata->rx_usecs; ec->rx_max_coalesced_frames = pdata->rx_frames; - ec->tx_coalesce_usecs = pdata->tx_usecs; ec->tx_max_coalesced_frames = pdata->tx_frames; DBGPR("<--xgbe_get_coalesce\n"); @@ -403,13 +398,14 @@ static int xgbe_set_coalesce(struct net_device *netdev, struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; unsigned int rx_frames, rx_riwt, rx_usecs; - unsigned int tx_frames, tx_usecs; + unsigned int tx_frames; DBGPR("-->xgbe_set_coalesce\n"); /* Check for not supported parameters */ if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) || + (ec->tx_coalesce_usecs) || (ec->tx_coalesce_usecs_irq) || (ec->tx_max_coalesced_frames_irq) || (ec->stats_block_coalesce_usecs) || @@ -439,17 +435,17 @@ static int xgbe_set_coalesce(struct net_device *netdev, } rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs); + rx_usecs = ec->rx_coalesce_usecs; rx_frames = ec->rx_max_coalesced_frames; /* Use smallest possible value if conversion resulted in zero */ - if (ec->rx_coalesce_usecs && !rx_riwt) + if (rx_usecs && !rx_riwt) rx_riwt = 1; /* Check the bounds of values for Rx */ if (rx_riwt > XGMAC_MAX_DMA_RIWT) { - rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT); netdev_alert(netdev, "rx-usec is limited to %d usecs\n", - rx_usecs); + hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT)); return -EINVAL; } if (rx_frames > pdata->rx_desc_count) { @@ -458,7 +454,6 @@ static int xgbe_set_coalesce(struct net_device *netdev, return -EINVAL; } - tx_usecs = ec->tx_coalesce_usecs; tx_frames = ec->tx_max_coalesced_frames; /* Check the bounds of values for Tx */ @@ -469,10 +464,10 @@ static int xgbe_set_coalesce(struct net_device *netdev, } pdata->rx_riwt = rx_riwt; + pdata->rx_usecs = rx_usecs; pdata->rx_frames = rx_frames; hw_if->config_rx_coalesce(pdata); - pdata->tx_usecs = tx_usecs; pdata->tx_frames = tx_frames; hw_if->config_tx_coalesce(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 32dd65137051..2e4c22d94a6b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -374,15 +374,6 @@ static int xgbe_probe(struct platform_device *pdev) pdata->awcache = XGBE_DMA_SYS_AWCACHE; } - /* Set the DMA mask */ - if (!dev->dma_mask) - dev->dma_mask = &dev->coherent_dma_mask; - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); - if (ret) { - dev_err(dev, "dma_set_mask_and_coherent failed\n"); - goto err_io; - } - /* Get the device interrupt */ ret = platform_get_irq(pdev, 0); if (ret < 0) { @@ -409,6 +400,16 @@ static int xgbe_probe(struct platform_device *pdev) /* Set default configuration data */ xgbe_default_config(pdata); + /* Set the DMA mask */ + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + ret = dma_set_mask_and_coherent(dev, + DMA_BIT_MASK(pdata->hw_feat.dma_width)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed\n"); + goto err_io; + } + /* Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 1eea3e5a5d08..dd742426eb04 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -222,7 +222,7 @@ ((_idx) & ((_ring)->rdesc_count - 1))) /* Default coalescing parameters */ -#define XGMAC_INIT_DMA_TX_USECS 50 +#define XGMAC_INIT_DMA_TX_USECS 1000 #define XGMAC_INIT_DMA_TX_FRAMES 25 #define XGMAC_MAX_DMA_RIWT 0xff @@ -410,7 +410,7 @@ struct xgbe_channel { unsigned int saved_ier; unsigned int tx_timer_active; - struct hrtimer tx_timer; + struct timer_list tx_timer; struct xgbe_ring *tx_ring; struct xgbe_ring *rx_ring; @@ -632,6 +632,7 @@ struct xgbe_hw_features { unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ unsigned int adv_ts_hi; /* Advance Timestamping High Word */ + unsigned int dma_width; /* DMA width */ unsigned int dcb; /* DCB Feature */ unsigned int sph; /* Split Header Feature */ unsigned int tso; /* TCP Segmentation Offload */ @@ -715,6 +716,7 @@ struct xgbe_prv_data { /* Rx coalescing settings */ unsigned int rx_riwt; + unsigned int rx_usecs; unsigned int rx_frames; /* Current Rx buffer size */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index ec45f3256f0e..d9bc89d69266 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -97,6 +97,8 @@ enum xgene_enet_rm { #define QCOHERENT BIT(4) #define RECOMBBUF BIT(27) +#define MAC_OFFSET 0x30 + #define BLOCK_ETH_CSR_OFFSET 0x2000 #define BLOCK_ETH_RING_IF_OFFSET 0x9000 #define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 635a83be7e5e..6146a993a136 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -645,9 +645,11 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; struct xgene_enet_desc_ring *buf_pool = NULL; - u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM; - u8 bp_bufnum = START_BP_BUFNUM; - u16 ring_id, ring_num = START_RING_NUM; + u8 cpu_bufnum = pdata->cpu_bufnum; + u8 eth_bufnum = pdata->eth_bufnum; + u8 bp_bufnum = pdata->bp_bufnum; + u16 ring_num = pdata->ring_num; + u16 ring_id; int ret; /* allocate rx descriptor ring */ @@ -752,6 +754,22 @@ static const struct net_device_ops xgene_ndev_ops = { .ndo_set_mac_address = xgene_enet_set_mac_address, }; +static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata) +{ + u32 id = 0; + int ret; + + ret = device_property_read_u32(dev, "port-id", &id); + if (!ret && id > 1) { + dev_err(dev, "Incorrect port-id specified\n"); + return -ENODEV; + } + + pdata->port_id = id; + + return 0; +} + static int xgene_get_mac_address(struct device *dev, unsigned char *addr) { @@ -843,6 +861,10 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) } pdata->rx_irq = ret; + ret = xgene_get_port_id(dev, pdata); + if (ret) + return ret; + if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN) eth_hw_addr_random(ndev); @@ -866,13 +888,13 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) pdata->clk = NULL; } - base_addr = pdata->base_addr; + base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET); pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { - pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET; + pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET; pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; } else { pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; @@ -935,6 +957,24 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) pdata->rm = RM0; break; } + + switch (pdata->port_id) { + case 0: + pdata->cpu_bufnum = START_CPU_BUFNUM_0; + pdata->eth_bufnum = START_ETH_BUFNUM_0; + pdata->bp_bufnum = START_BP_BUFNUM_0; + pdata->ring_num = START_RING_NUM_0; + break; + case 1: + pdata->cpu_bufnum = START_CPU_BUFNUM_1; + pdata->eth_bufnum = START_ETH_BUFNUM_1; + pdata->bp_bufnum = START_BP_BUFNUM_1; + pdata->ring_num = START_RING_NUM_1; + break; + default: + break; + } + } static int xgene_enet_probe(struct platform_device *pdev) @@ -1033,7 +1073,7 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); #endif #ifdef CONFIG_OF -static struct of_device_id xgene_enet_of_match[] = { +static const struct of_device_id xgene_enet_of_match[] = { {.compatible = "apm,xgene-enet",}, {.compatible = "apm,xgene1-sgenet",}, {.compatible = "apm,xgene1-xgenet",}, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index c2d465c3db66..b93ed21a157f 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -41,9 +41,15 @@ #define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) #define NUM_PKT_BUF 64 #define NUM_BUFPOOL 32 -#define START_ETH_BUFNUM 2 -#define START_BP_BUFNUM 0x22 -#define START_RING_NUM 8 + +#define START_CPU_BUFNUM_0 0 +#define START_ETH_BUFNUM_0 2 +#define START_BP_BUFNUM_0 0x22 +#define START_RING_NUM_0 8 +#define START_CPU_BUFNUM_1 12 +#define START_ETH_BUFNUM_1 10 +#define START_BP_BUFNUM_1 0x2A +#define START_RING_NUM_1 264 #define PHY_POLL_LINK_ON (10 * HZ) #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) @@ -125,6 +131,11 @@ struct xgene_enet_pdata { struct xgene_mac_ops *mac_ops; struct xgene_port_ops *port_ops; struct delayed_work link_work; + u32 port_id; + u8 cpu_bufnum; + u8 eth_bufnum; + u8 bp_bufnum; + u16 ring_num; }; struct xgene_indirect_ctl { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index f5d4f68c288c..f27fb6f2a93b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -226,6 +226,7 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *p) static void xgene_sgmac_init(struct xgene_enet_pdata *p) { u32 data, loop = 10; + u32 offset = p->port_id * 4; xgene_sgmac_reset(p); @@ -272,9 +273,9 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p) xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0); /* Bypass traffic gating */ - xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); + xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0); xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX); - xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR, RESUME_RX0); + xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0); } static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) @@ -330,13 +331,14 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, u32 dst_ring_num, u16 bufpool_id) { u32 data, fpsel; + u32 offset = p->port_id * MAC_OFFSET; data = CFG_CLE_BYPASS_EN0; - xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR, data); + xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data); fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel); - xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR, data); + xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data); } static void xgene_enet_shutdown(struct xgene_enet_pdata *p) diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index daae0e016253..2f98846e2d89 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1621,7 +1621,7 @@ static int bmac_remove(struct macio_dev *mdev) return 0; } -static struct of_device_id bmac_match[] = +static const struct of_device_id bmac_match[] = { { .name = "bmac", diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 7fcaf0da42a8..a18948286682 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -984,7 +984,7 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) return IRQ_HANDLED; } -static struct of_device_id mace_match[] = +static const struct of_device_id mace_match[] = { { .name = "mace", diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index ee4fdfe65e9e..a6f9142b9048 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -142,7 +142,7 @@ config BNX2X_SRIOV config BGMAC tristate "BCMA bus GBit core support" - depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX + depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X) select PHYLIB ---help--- This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 0469f72c6e7e..fa8f9e147c34 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -14,6 +14,7 @@ #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/phy.h> +#include <linux/phy_fixed.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <bcm47xx_nvram.h> @@ -114,53 +115,91 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac, bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); } +static void +bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, + int i, int len, u32 ctl0) +{ + struct bgmac_slot_info *slot; + struct bgmac_dma_desc *dma_desc; + u32 ctl1; + + if (i == ring->num_slots - 1) + ctl0 |= BGMAC_DESC_CTL0_EOT; + + ctl1 = len & BGMAC_DESC_CTL1_LEN; + + slot = &ring->slots[i]; + dma_desc = &ring->cpu_base[i]; + dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); + dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); + dma_desc->ctl0 = cpu_to_le32(ctl0); + dma_desc->ctl1 = cpu_to_le32(ctl1); +} + static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct sk_buff *skb) { struct device *dma_dev = bgmac->core->dma_dev; struct net_device *net_dev = bgmac->net_dev; - struct bgmac_dma_desc *dma_desc; - struct bgmac_slot_info *slot; - u32 ctl0, ctl1; + struct bgmac_slot_info *slot = &ring->slots[ring->end]; int free_slots; + int nr_frags; + u32 flags; + int index = ring->end; + int i; if (skb->len > BGMAC_DESC_CTL1_LEN) { bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); - goto err_stop_drop; + goto err_drop; } + if (skb->ip_summed == CHECKSUM_PARTIAL) + skb_checksum_help(skb); + + nr_frags = skb_shinfo(skb)->nr_frags; + if (ring->start <= ring->end) free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS; else free_slots = ring->start - ring->end; - if (free_slots == 1) { + + if (free_slots <= nr_frags + 1) { bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); netif_stop_queue(net_dev); return NETDEV_TX_BUSY; } - slot = &ring->slots[ring->end]; - slot->skb = skb; - slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len, + slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(dma_dev, slot->dma_addr)) { - bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", - ring->mmio_base); - goto err_stop_drop; - } + if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) + goto err_dma_head; - ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF; - if (ring->end == ring->num_slots - 1) - ctl0 |= BGMAC_DESC_CTL0_EOT; - ctl1 = skb->len & BGMAC_DESC_CTL1_LEN; + flags = BGMAC_DESC_CTL0_SOF; + if (!nr_frags) + flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; - dma_desc = ring->cpu_base; - dma_desc += ring->end; - dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); - dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); - dma_desc->ctl0 = cpu_to_le32(ctl0); - dma_desc->ctl1 = cpu_to_le32(ctl1); + bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags); + flags = 0; + + for (i = 0; i < nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + int len = skb_frag_size(frag); + + index = (index + 1) % BGMAC_TX_RING_SLOTS; + slot = &ring->slots[index]; + slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0, + len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) + goto err_dma; + + if (i == nr_frags - 1) + flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; + + bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags); + } + + slot->skb = skb; netdev_sent_queue(net_dev, skb->len); @@ -169,20 +208,35 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, /* Increase ring->end to point empty slot. We tell hardware the first * slot it should *not* read. */ - if (++ring->end >= BGMAC_TX_RING_SLOTS) - ring->end = 0; + ring->end = (index + 1) % BGMAC_TX_RING_SLOTS; bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, ring->index_base + ring->end * sizeof(struct bgmac_dma_desc)); - /* Always keep one slot free to allow detecting bugged calls. */ - if (--free_slots == 1) + free_slots -= nr_frags + 1; + if (free_slots < 8) netif_stop_queue(net_dev); return NETDEV_TX_OK; -err_stop_drop: - netif_stop_queue(net_dev); +err_dma: + dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), + DMA_TO_DEVICE); + + while (i > 0) { + int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; + struct bgmac_slot_info *slot = &ring->slots[index]; + u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); + int len = ctl1 & BGMAC_DESC_CTL1_LEN; + + dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); + } + +err_dma_head: + bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", + ring->mmio_base); + +err_drop: dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -204,32 +258,45 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) while (ring->start != empty_slot) { struct bgmac_slot_info *slot = &ring->slots[ring->start]; + u32 ctl1 = le32_to_cpu(ring->cpu_base[ring->start].ctl1); + int len = ctl1 & BGMAC_DESC_CTL1_LEN; - if (slot->skb) { + if (!slot->dma_addr) { + bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n", + ring->start, ring->end); + goto next; + } + + if (ctl1 & BGMAC_DESC_CTL0_SOF) /* Unmap no longer used buffer */ - dma_unmap_single(dma_dev, slot->dma_addr, - slot->skb->len, DMA_TO_DEVICE); - slot->dma_addr = 0; + dma_unmap_single(dma_dev, slot->dma_addr, len, + DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, slot->dma_addr, len, + DMA_TO_DEVICE); + if (slot->skb) { bytes_compl += slot->skb->len; pkts_compl++; /* Free memory! :) */ dev_kfree_skb(slot->skb); slot->skb = NULL; - } else { - bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n", - ring->start, ring->end); } +next: + slot->dma_addr = 0; if (++ring->start >= BGMAC_TX_RING_SLOTS) ring->start = 0; freed = true; } + if (!pkts_compl) + return; + netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); - if (freed && netif_queue_stopped(bgmac->net_dev)) + if (netif_queue_stopped(bgmac->net_dev)) netif_wake_queue(bgmac->net_dev); } @@ -275,31 +342,31 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, struct bgmac_slot_info *slot) { struct device *dma_dev = bgmac->core->dma_dev; - struct sk_buff *skb; dma_addr_t dma_addr; struct bgmac_rx_header *rx; + void *buf; /* Alloc skb */ - skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); - if (!skb) + buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE); + if (!buf) return -ENOMEM; /* Poison - if everything goes fine, hardware will overwrite it */ - rx = (struct bgmac_rx_header *)skb->data; + rx = buf; rx->len = cpu_to_le16(0xdead); rx->flags = cpu_to_le16(0xbeef); /* Map skb for the DMA */ - dma_addr = dma_map_single(dma_dev, skb->data, - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + dma_addr = dma_map_single(dma_dev, buf, BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); if (dma_mapping_error(dma_dev, dma_addr)) { bgmac_err(bgmac, "DMA mapping error\n"); - dev_kfree_skb(skb); + put_page(virt_to_head_page(buf)); return -ENOMEM; } /* Update the slot */ - slot->skb = skb; + slot->buf = buf; slot->dma_addr = dma_addr; return 0; @@ -342,8 +409,9 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, while (ring->start != ring->end) { struct device *dma_dev = bgmac->core->dma_dev; struct bgmac_slot_info *slot = &ring->slots[ring->start]; - struct sk_buff *skb = slot->skb; - struct bgmac_rx_header *rx; + struct bgmac_rx_header *rx = slot->buf; + struct sk_buff *skb; + void *buf = slot->buf; u16 len, flags; /* Unmap buffer to make it accessible to the CPU */ @@ -351,7 +419,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); /* Get info from the header */ - rx = (struct bgmac_rx_header *)skb->data; len = le16_to_cpu(rx->len); flags = le16_to_cpu(rx->flags); @@ -392,12 +459,13 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, dma_unmap_single(dma_dev, old_dma_addr, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); skb_put(skb, BGMAC_RX_FRAME_OFFSET + len); skb_pull(skb, BGMAC_RX_FRAME_OFFSET); skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bgmac->net_dev); - netif_receive_skb(skb); + napi_gro_receive(&bgmac->napi, skb); handled++; } while (0); @@ -433,40 +501,79 @@ static bool bgmac_dma_unaligned(struct bgmac *bgmac, return false; } -static void bgmac_dma_ring_free(struct bgmac *bgmac, - struct bgmac_dma_ring *ring) +static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) { struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_dma_desc *dma_desc = ring->cpu_base; struct bgmac_slot_info *slot; - int size; int i; for (i = 0; i < ring->num_slots; i++) { + int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; + slot = &ring->slots[i]; - if (slot->skb) { - if (slot->dma_addr) - dma_unmap_single(dma_dev, slot->dma_addr, - slot->skb->len, DMA_TO_DEVICE); - dev_kfree_skb(slot->skb); - } + dev_kfree_skb(slot->skb); + + if (!slot->dma_addr) + continue; + + if (slot->skb) + dma_unmap_single(dma_dev, slot->dma_addr, + len, DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, slot->dma_addr, + len, DMA_TO_DEVICE); } +} - if (ring->cpu_base) { - /* Free ring of descriptors */ - size = ring->num_slots * sizeof(struct bgmac_dma_desc); - dma_free_coherent(dma_dev, size, ring->cpu_base, - ring->dma_base); +static void bgmac_dma_rx_ring_free(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_slot_info *slot; + int i; + + for (i = 0; i < ring->num_slots; i++) { + slot = &ring->slots[i]; + if (!slot->buf) + continue; + + if (slot->dma_addr) + dma_unmap_single(dma_dev, slot->dma_addr, + BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); + put_page(virt_to_head_page(slot->buf)); } } +static void bgmac_dma_ring_desc_free(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + struct device *dma_dev = bgmac->core->dma_dev; + int size; + + if (!ring->cpu_base) + return; + + /* Free ring of descriptors */ + size = ring->num_slots * sizeof(struct bgmac_dma_desc); + dma_free_coherent(dma_dev, size, ring->cpu_base, + ring->dma_base); +} + static void bgmac_dma_free(struct bgmac *bgmac) { int i; - for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) - bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]); - for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) - bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]); + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { + bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]); + bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i]); + } + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { + bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]); + bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i]); + } } static int bgmac_dma_alloc(struct bgmac *bgmac) @@ -1330,13 +1437,46 @@ static void bgmac_adjust_link(struct net_device *net_dev) } } +static int bgmac_fixed_phy_register(struct bgmac *bgmac) +{ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + }; + struct phy_device *phy_dev; + int err; + + phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phy_dev || IS_ERR(phy_dev)) { + bgmac_err(bgmac, "Failed to register fixed PHY device\n"); + return -ENODEV; + } + + err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, + PHY_INTERFACE_MODE_MII); + if (err) { + bgmac_err(bgmac, "Connecting PHY failed\n"); + return err; + } + + bgmac->phy_dev = phy_dev; + + return err; +} + static int bgmac_mii_register(struct bgmac *bgmac) { + struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; struct mii_bus *mii_bus; struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; int i, err = 0; + if (ci->id == BCMA_CHIP_ID_BCM4707 || + ci->id == BCMA_CHIP_ID_BCM53018) + return bgmac_fixed_phy_register(bgmac); + mii_bus = mdiobus_alloc(); if (!mii_bus) return -ENOMEM; @@ -1517,6 +1657,10 @@ static int bgmac_probe(struct bcma_device *core) goto err_dma_free; } + net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + net_dev->hw_features = net_dev->features; + net_dev->vlan_features = net_dev->features; + err = register_netdev(bgmac->net_dev); if (err) { bgmac_err(bgmac, "Cannot register net device\n"); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 89fa5bc69c51..3ad965fe7fcc 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -345,8 +345,8 @@ #define BGMAC_DESC_CTL0_EOT 0x10000000 /* End of ring */ #define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */ -#define BGMAC_DESC_CTL0_SOF 0x40000000 /* Start of frame */ -#define BGMAC_DESC_CTL0_EOF 0x80000000 /* End of frame */ +#define BGMAC_DESC_CTL0_EOF 0x40000000 /* End of frame */ +#define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */ #define BGMAC_DESC_CTL1_LEN 0x00001FFF #define BGMAC_PHY_NOREGS 0x1E @@ -362,6 +362,8 @@ #define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */ #define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */ #define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE) +#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define BGMAC_BFL_ENETROBO 0x0010 /* has ephy roboswitch spi */ #define BGMAC_BFL_ENETADM 0x0080 /* has ADMtek switch */ @@ -383,7 +385,10 @@ #define ETHER_MAX_LEN 1518 struct bgmac_slot_info { - struct sk_buff *skb; + union { + struct sk_buff *skb; + void *buf; + }; dma_addr_t dma_addr; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2ee12b54239b..9677431c582a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12769,7 +12769,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; - if (!CHIP_IS_E1x(bp)) { + if (!chip_is_e1x) { dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; dev->hw_enc_features = diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 275be56fd324..12956b143b11 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -197,6 +197,14 @@ enum dma_reg { DMA_PRIORITY_0, DMA_PRIORITY_1, DMA_PRIORITY_2, + DMA_INDEX2RING_0, + DMA_INDEX2RING_1, + DMA_INDEX2RING_2, + DMA_INDEX2RING_3, + DMA_INDEX2RING_4, + DMA_INDEX2RING_5, + DMA_INDEX2RING_6, + DMA_INDEX2RING_7, }; static const u8 bcmgenet_dma_regs_v3plus[] = { @@ -208,6 +216,14 @@ static const u8 bcmgenet_dma_regs_v3plus[] = { [DMA_PRIORITY_0] = 0x30, [DMA_PRIORITY_1] = 0x34, [DMA_PRIORITY_2] = 0x38, + [DMA_INDEX2RING_0] = 0x70, + [DMA_INDEX2RING_1] = 0x74, + [DMA_INDEX2RING_2] = 0x78, + [DMA_INDEX2RING_3] = 0x7C, + [DMA_INDEX2RING_4] = 0x80, + [DMA_INDEX2RING_5] = 0x84, + [DMA_INDEX2RING_6] = 0x88, + [DMA_INDEX2RING_7] = 0x8C, }; static const u8 bcmgenet_dma_regs_v2[] = { @@ -1130,11 +1146,6 @@ static int bcmgenet_xmit_single(struct net_device *dev, dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); - /* Decrement total BD count and advance our write pointer */ - ring->free_bds -= 1; - ring->prod_index += 1; - ring->prod_index &= DMA_P_INDEX_MASK; - return 0; } @@ -1173,11 +1184,6 @@ static int bcmgenet_xmit_frag(struct net_device *dev, (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); - - ring->free_bds -= 1; - ring->prod_index += 1; - ring->prod_index &= DMA_P_INDEX_MASK; - return 0; } @@ -1321,51 +1327,65 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); - /* we kept a software copy of how much we should advance the TDMA - * producer index, now write it down to the hardware - */ - bcmgenet_tdma_ring_writel(priv, ring->index, - ring->prod_index, TDMA_PROD_INDEX); + /* Decrement total BD count and advance our write pointer */ + ring->free_bds -= nr_frags + 1; + ring->prod_index += nr_frags + 1; + ring->prod_index &= DMA_P_INDEX_MASK; if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) netif_tx_stop_queue(txq); + if (!skb->xmit_more || netif_xmit_stopped(txq)) + /* Packets are ready, update producer index */ + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); out: spin_unlock_irqrestore(&ring->lock, flags); return ret; } - -static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) +static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, + struct enet_cb *cb) { struct device *kdev = &priv->pdev->dev; struct sk_buff *skb; + struct sk_buff *rx_skb; dma_addr_t mapping; - int ret; + /* Allocate a new Rx skb */ skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); - if (!skb) - return -ENOMEM; + if (!skb) { + priv->mib.alloc_rx_buff_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb allocation failed\n", __func__); + return NULL; + } - /* a caller did not release this control block */ - WARN_ON(cb->skb != NULL); - cb->skb = skb; - mapping = dma_map_single(kdev, skb->data, - priv->rx_buf_len, DMA_FROM_DEVICE); - ret = dma_mapping_error(kdev, mapping); - if (ret) { + /* DMA-map the new Rx skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { priv->mib.rx_dma_failed++; - bcmgenet_free_cb(cb); + dev_kfree_skb_any(skb); netif_err(priv, rx_err, priv->dev, - "%s DMA map failed\n", __func__); - return ret; + "%s: Rx skb DMA mapping failed\n", __func__); + return NULL; } + /* Grab the current Rx skb from the ring and DMA-unmap it */ + rx_skb = cb->skb; + if (likely(rx_skb)) + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), + priv->rx_buf_len, DMA_FROM_DEVICE); + + /* Put the new Rx skb on the ring */ + cb->skb = skb; dma_unmap_addr_set(cb, dma_addr, mapping); dmadesc_set_addr(priv, cb->bd_addr, mapping); - return 0; + /* Return the current Rx skb to caller */ + return rx_skb; } /* bcmgenet_desc_rx - descriptor based rx process. @@ -1381,12 +1401,30 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, struct sk_buff *skb; u32 dma_length_status; unsigned long dma_flag; - int len, err; + int len; unsigned int rxpktprocessed = 0, rxpkttoprocess; unsigned int p_index; + unsigned int discards; unsigned int chksum_ok = 0; p_index = bcmgenet_rdma_ring_readl(priv, index, RDMA_PROD_INDEX); + + discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & + DMA_P_INDEX_DISCARD_CNT_MASK; + if (discards > ring->old_discards) { + discards = discards - ring->old_discards; + dev->stats.rx_missed_errors += discards; + dev->stats.rx_errors += discards; + ring->old_discards += discards; + + /* Clear HW register when we reach 75% of maximum 0xFFFF */ + if (ring->old_discards >= 0xC000) { + ring->old_discards = 0; + bcmgenet_rdma_ring_writel(priv, index, 0, + RDMA_PROD_INDEX); + } + } + p_index &= DMA_P_INDEX_MASK; if (likely(p_index >= ring->c_index)) @@ -1401,26 +1439,14 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, while ((rxpktprocessed < rxpkttoprocess) && (rxpktprocessed < budget)) { cb = &priv->rx_cbs[ring->read_ptr]; - skb = cb->skb; + skb = bcmgenet_rx_refill(priv, cb); - /* We do not have a backing SKB, so we do not have a - * corresponding DMA mapping for this incoming packet since - * bcmgenet_rx_refill always either has both skb and mapping or - * none. - */ if (unlikely(!skb)) { dev->stats.rx_dropped++; dev->stats.rx_errors++; - goto refill; + goto next; } - /* Unmap the packet contents such that we can use the - * RSV from the 64 bytes descriptor when enabled and save - * a 32-bits register read - */ - dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), - priv->rx_buf_len, DMA_FROM_DEVICE); - if (!priv->desc_64b_en) { dma_length_status = dmadesc_get_length_status(priv, cb->bd_addr); @@ -1447,10 +1473,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, "dropping fragmented packet!\n"); dev->stats.rx_dropped++; dev->stats.rx_errors++; - dev_kfree_skb_any(cb->skb); - cb->skb = NULL; - goto refill; + dev_kfree_skb_any(skb); + goto next; } + /* report errors */ if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | DMA_RX_OV | @@ -1469,11 +1495,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, dev->stats.rx_length_errors++; dev->stats.rx_dropped++; dev->stats.rx_errors++; - - /* discard the packet and advance consumer index.*/ - dev_kfree_skb_any(cb->skb); - cb->skb = NULL; - goto refill; + dev_kfree_skb_any(skb); + goto next; } /* error packet */ chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && @@ -1506,17 +1529,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, /* Notify kernel */ napi_gro_receive(&priv->napi, skb); - cb->skb = NULL; netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); - /* refill RX path on the current control block */ -refill: - err = bcmgenet_rx_refill(priv, cb); - if (err) { - priv->mib.alloc_rx_buff_failed++; - netif_err(priv, rx_err, dev, "Rx refill failed\n"); - } - +next: rxpktprocessed++; if (likely(ring->read_ptr < ring->end_ptr)) ring->read_ptr++; @@ -1535,7 +1550,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, struct bcmgenet_rx_ring *ring) { struct enet_cb *cb; - int ret = 0; + struct sk_buff *skb; int i; netif_dbg(priv, hw, priv->dev, "%s\n", __func__); @@ -1543,15 +1558,14 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, /* loop here for each buffer needing assign */ for (i = 0; i < ring->size; i++) { cb = ring->cbs + i; - if (cb->skb) - continue; - - ret = bcmgenet_rx_refill(priv, cb); - if (ret) - break; + skb = bcmgenet_rx_refill(priv, cb); + if (skb) + dev_kfree_skb_any(skb); + if (!cb->skb) + return -ENOMEM; } - return ret; + return 0; } static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) @@ -2285,6 +2299,160 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) bcmgenet_tdma_writel(priv, reg, DMA_CTRL); } +static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv, + u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + return !!(reg & (1 << (f_index % 32))); +} + +static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg |= (1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, + u32 f_index, u32 rx_queue) +{ + u32 offset; + u32 reg; + + offset = f_index / 8; + reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); + reg &= ~(0xF << (4 * (f_index % 8))); + reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); + bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); +} + +static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, + u32 f_index, u32 f_length) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_LEN_V3PLUS + + ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * + sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg &= ~(0xFF << (8 * (f_index % 4))); + reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv) +{ + u32 f_index; + + for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++) + if (!bcmgenet_hfb_is_filter_enabled(priv, f_index)) + return f_index; + + return -ENOMEM; +} + +/* bcmgenet_hfb_add_filter + * + * Add new filter to Hardware Filter Block to match and direct Rx traffic to + * desired Rx queue. + * + * f_data is an array of unsigned 32-bit integers where each 32-bit integer + * provides filter data for 2 bytes (4 nibbles) of Rx frame: + * + * bits 31:20 - unused + * bit 19 - nibble 0 match enable + * bit 18 - nibble 1 match enable + * bit 17 - nibble 2 match enable + * bit 16 - nibble 3 match enable + * bits 15:12 - nibble 0 data + * bits 11:8 - nibble 1 data + * bits 7:4 - nibble 2 data + * bits 3:0 - nibble 3 data + * + * Example: + * In order to match: + * - Ethernet frame type = 0x0800 (IP) + * - IP version field = 4 + * - IP protocol field = 0x11 (UDP) + * + * The following filter is needed: + * u32 hfb_filter_ipv4_udp[] = { + * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000, + * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000, + * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011, + * }; + * + * To add the filter to HFB and direct the traffic to Rx queue 0, call: + * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp, + * ARRAY_SIZE(hfb_filter_ipv4_udp), 0); + */ +int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data, + u32 f_length, u32 rx_queue) +{ + int f_index; + u32 i; + + f_index = bcmgenet_hfb_find_unused_filter(priv); + if (f_index < 0) + return -ENOMEM; + + if (f_length > priv->hw_params->hfb_filter_size) + return -EINVAL; + + for (i = 0; i < f_length; i++) + bcmgenet_hfb_writel(priv, f_data[i], + (f_index * priv->hw_params->hfb_filter_size + i) * + sizeof(u32)); + + bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); + bcmgenet_hfb_enable_filter(priv, f_index); + bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL); + + return 0; +} + +/* bcmgenet_hfb_clear + * + * Clear Hardware Filter Block and disable all filtering. + */ +static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) +{ + u32 i; + + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); + + for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) + bcmgenet_rdma_writel(priv, 0x0, i); + + for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) + bcmgenet_hfb_reg_writel(priv, 0x0, + HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); + + for (i = 0; i < priv->hw_params->hfb_filter_cnt * + priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); +} + +static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + bcmgenet_hfb_clear(priv); +} + static void bcmgenet_netif_start(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -2350,6 +2518,9 @@ static int bcmgenet_open(struct net_device *dev) /* Always enable ring 16 - descriptor ring */ bcmgenet_enable_dma(priv, dma_ctrl); + /* HFB init */ + bcmgenet_hfb_init(priv); + ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, dev->name, priv); if (ret < 0) { @@ -2594,6 +2765,7 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { .bp_in_en_shift = 17, .bp_in_mask = 0x1ffff, .hfb_filter_cnt = 48, + .hfb_filter_size = 128, .qtag_mask = 0x3F, .tbuf_offset = 0x0600, .hfb_offset = 0x8000, @@ -2611,6 +2783,7 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { .bp_in_en_shift = 17, .bp_in_mask = 0x1ffff, .hfb_filter_cnt = 48, + .hfb_filter_size = 128, .qtag_mask = 0x3F, .tbuf_offset = 0x0600, .hfb_offset = 0x8000, diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 17443db8dc53..1ea838946318 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -509,6 +509,7 @@ struct bcmgenet_hw_params { u8 bp_in_en_shift; u32 bp_in_mask; u8 hfb_filter_cnt; + u8 hfb_filter_size; u8 qtag_mask; u16 tbuf_offset; u32 hfb_offset; @@ -548,6 +549,7 @@ struct bcmgenet_rx_ring { unsigned int read_ptr; /* Rx ring read pointer */ unsigned int cb_ptr; /* Rx ring initial CB ptr */ unsigned int end_ptr; /* Rx ring end CB ptr */ + unsigned int old_discards; }; /* device context */ diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index f00be585f661..a0a04b3638e6 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -293,10 +293,13 @@ static void macb_handle_link_change(struct net_device *dev) spin_unlock_irqrestore(&bp->lock, flags); - macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); - if (status_change) { if (phydev->link) { + /* Update the TX clock rate if and only if the link is + * up and there has been a link change. + */ + macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); + netif_carrier_on(dev); netdev_info(dev, "link up (%d/%s)\n", phydev->speed, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 4af8a9fd75ae..dd4b2da6e468 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -124,7 +124,7 @@ struct filter_entry { /* Macros needed to support the PCI Device ID Table ... */ #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ - static struct pci_device_id cxgb4_pci_tbl[] = { + static const struct pci_device_id cxgb4_pci_tbl[] = { #define CH_PCI_DEVICE_ID_FUNCTION 0x4 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 1498d078c319..afbe1682ff48 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1120,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, } /* Installed successfully, update the cached header too. */ - memcpy(card_fw, fs_fw, sizeof(*card_fw)); + *card_fw = *fs_fw; card_fw_usable = 1; *reset = 0; /* already reset as part of load_fw */ } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index ddfb5b846045..1a9a6f334d2d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -60,8 +60,6 @@ * -- Used to finish the definition of the PCI ID Table. Note that we * -- will be adding a trailing semi-colon (";") here. */ -#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN - #ifndef CH_PCI_DEVICE_ID_FUNCTION #error CH_PCI_DEVICE_ID_FUNCTION not defined! #endif @@ -154,8 +152,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; -#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */ - #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 122e2964e63b..1d893b0b7ddf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -3034,7 +3034,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) /* Macros needed to support the PCI Device ID Table ... */ #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ - static struct pci_device_id cxgb4vf_pci_tbl[] = { + static const struct pci_device_id cxgb4vf_pci_tbl[] = { #define CH_PCI_DEVICE_ID_FUNCTION 0x8 #define CH_PCI_ID_TABLE_ENTRY(devid) \ diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 3b42556f7f8d..ed41559bae77 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev) (unsigned int)tp->rx_ring[i].buffer1, (unsigned int)tp->rx_ring[i].buffer2, buf[0], buf[1], buf[2]); - for (j = 0; buf[j] != 0xee && j < 1600; j++) + for (j = 0; ((j < 1600) && buf[j] != 0xee); j++) if (j < 100) pr_cont(" %02x", buf[j]); pr_cont(" j=%d\n", j); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 996bbc6a244f..eb39673ed6a6 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -362,6 +362,7 @@ struct be_vf_cfg { u16 vlan_tag; u32 tx_rate; u32 plink_tracking; + u32 privileges; }; enum vf_state { @@ -468,6 +469,7 @@ struct be_adapter { u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ u8 __iomem *db; /* Door Bell */ + u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ struct be_dma_mem mbox_mem; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index dc278391a391..fb140faeafb1 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1849,15 +1849,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, { int num_eqs, i = 0; - if (lancer_chip(adapter) && num > 8) { - while (num) { - num_eqs = min(num, 8); - __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); - i += num_eqs; - num -= num_eqs; - } - } else { - __be_cmd_modify_eqd(adapter, set_eqd, num); + while (num) { + num_eqs = min(num, 8); + __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); + i += num_eqs; + num -= num_eqs; } return 0; @@ -1865,7 +1861,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num) + u32 num, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_vlan_config *req; @@ -1883,6 +1879,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); + req->hdr.domain = domain; req->interface_id = if_id; req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 53e903f37247..1ec22300e254 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -2266,7 +2266,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, int be_cmd_get_fw_ver(struct be_adapter *adapter); int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num); + u32 num, u32 domain); int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 5652b005947f..d8df78b6554d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1261,7 +1261,7 @@ static int be_vid_config(struct be_adapter *adapter) for_each_set_bit(i, adapter->vids, VLAN_N_VID) vids[num++] = cpu_to_le16(i); - status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); + status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0); if (status) { dev_err(dev, "Setting HW VLAN filtering failed\n"); /* Set to VLAN promisc mode as setting VLAN filter failed */ @@ -1470,11 +1470,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf, return 0; } +static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan) +{ + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + u16 vids[BE_NUM_VLANS_SUPPORTED]; + int vf_if_id = vf_cfg->if_handle; + int status; + + /* Enable Transparent VLAN Tagging */ + status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0); + if (status) + return status; + + /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */ + vids[0] = 0; + status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1); + if (!status) + dev_info(&adapter->pdev->dev, + "Cleared guest VLANs on VF%d", vf); + + /* After TVT is enabled, disallow VFs to program VLAN filters */ + if (vf_cfg->privileges & BE_PRIV_FILTMGMT) { + status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges & + ~BE_PRIV_FILTMGMT, vf + 1); + if (!status) + vf_cfg->privileges &= ~BE_PRIV_FILTMGMT; + } + return 0; +} + +static int be_clear_vf_tvt(struct be_adapter *adapter, int vf) +{ + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + struct device *dev = &adapter->pdev->dev; + int status; + + /* Reset Transparent VLAN Tagging. */ + status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1, + vf_cfg->if_handle, 0); + if (status) + return status; + + /* Allow VFs to program VLAN filtering */ + if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges | + BE_PRIV_FILTMGMT, vf + 1); + if (!status) { + vf_cfg->privileges |= BE_PRIV_FILTMGMT; + dev_info(dev, "VF%d: FILTMGMT priv enabled", vf); + } + } + + dev_info(dev, + "Disable/re-enable i/f in VM to clear Transparent VLAN tag"); + return 0; +} + static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; - int status = 0; + int status; if (!sriov_enabled(adapter)) return -EPERM; @@ -1484,24 +1540,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) if (vlan || qos) { vlan |= qos << VLAN_PRIO_SHIFT; - if (vf_cfg->vlan_tag != vlan) - status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, - vf_cfg->if_handle, 0); + status = be_set_vf_tvt(adapter, vf, vlan); } else { - /* Reset Transparent Vlan Tagging. */ - status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, - vf + 1, vf_cfg->if_handle, 0); + status = be_clear_vf_tvt(adapter, vf); } if (status) { dev_err(&adapter->pdev->dev, - "VLAN %d config on VF %d failed : %#x\n", vlan, - vf, status); + "VLAN %d config on VF %d failed : %#x\n", vlan, vf, + status); return be_cmd_status(status); } vf_cfg->vlan_tag = vlan; - return 0; } @@ -2868,14 +2919,12 @@ void be_detect_error(struct be_adapter *adapter) } } } else { - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_LOW, &ue_lo); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_HIGH, &ue_hi); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); + ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW); + ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH); + ue_lo_mask = ioread32(adapter->pcicfg + + PCICFG_UE_STATUS_LOW_MASK); + ue_hi_mask = ioread32(adapter->pcicfg + + PCICFG_UE_STATUS_HI_MASK); ue_lo = (ue_lo & ~ue_lo_mask); ue_hi = (ue_hi & ~ue_hi_mask); @@ -3480,7 +3529,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, u32 cap_flags, u32 vf) { u32 en_flags; - int status; en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | @@ -3488,10 +3536,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, en_flags &= cap_flags; - status = be_cmd_if_create(adapter, cap_flags, en_flags, - if_handle, vf); - - return status; + return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); } static int be_vfs_if_create(struct be_adapter *adapter) @@ -3510,8 +3555,13 @@ static int be_vfs_if_create(struct be_adapter *adapter) status = be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, vf + 1); - if (!status) + if (!status) { cap_flags = res.if_cap_flags; + /* Prevent VFs from enabling VLAN promiscuous + * mode + */ + cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } } status = be_if_create(adapter, &vf_cfg->if_handle, @@ -3545,7 +3595,6 @@ static int be_vf_setup(struct be_adapter *adapter) struct device *dev = &adapter->pdev->dev; struct be_vf_cfg *vf_cfg; int status, old_vfs, vf; - u32 privileges; old_vfs = pci_num_vf(adapter->pdev); @@ -3575,15 +3624,18 @@ static int be_vf_setup(struct be_adapter *adapter) for_all_vfs(adapter, vf_cfg, vf) { /* Allow VFs to programs MAC/VLAN filters */ - status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); - if (!status && !(privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges, + vf + 1); + if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { status = be_cmd_set_fn_privileges(adapter, - privileges | + vf_cfg->privileges | BE_PRIV_FILTMGMT, vf + 1); - if (!status) + if (!status) { + vf_cfg->privileges |= BE_PRIV_FILTMGMT; dev_info(dev, "VF%d has FILTMGMT privilege\n", vf); + } } /* Allow full available bandwidth */ @@ -5154,6 +5206,7 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) static int be_map_pci_bars(struct be_adapter *adapter) { + struct pci_dev *pdev = adapter->pdev; u8 __iomem *addr; u32 sli_intf; @@ -5163,21 +5216,33 @@ static int be_map_pci_bars(struct be_adapter *adapter) adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; if (BEx_chip(adapter) && be_physfn(adapter)) { - adapter->csr = pci_iomap(adapter->pdev, 2, 0); + adapter->csr = pci_iomap(pdev, 2, 0); if (!adapter->csr) return -ENOMEM; } - addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); + addr = pci_iomap(pdev, db_bar(adapter), 0); if (!addr) goto pci_map_err; adapter->db = addr; + if (skyhawk_chip(adapter) || BEx_chip(adapter)) { + if (be_physfn(adapter)) { + /* PCICFG is the 2nd BAR in BE2 */ + addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0); + if (!addr) + goto pci_map_err; + adapter->pcicfg = addr; + } else { + adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; + } + } + be_roce_map_pci_bars(adapter); return 0; pci_map_err: - dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); + dev_err(&pdev->dev, "Error in mapping PCI BARs\n"); be_unmap_pci_bars(adapter); return -ENOMEM; } diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index f88cfaa359e7..442410cd2ca4 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1299,7 +1299,7 @@ static int ethoc_resume(struct platform_device *pdev) # define ethoc_resume NULL #endif -static struct of_device_id ethoc_match[] = { +static const struct of_device_id ethoc_match[] = { { .compatible = "opencores,ethoc", }, {}, }; diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index ba84c4a9ce32..25e3425729d0 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -58,14 +58,12 @@ source "drivers/net/ethernet/freescale/fs_enet/Kconfig" config FSL_PQ_MDIO tristate "Freescale PQ MDIO" - depends on FSL_SOC select PHYLIB ---help--- This driver supports the MDIO bus used by the gianfar and UCC drivers. config FSL_XGMAC_MDIO tristate "Freescale XGMAC MDIO" - depends on FSL_SOC select PHYLIB select OF_MDIO ---help--- diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 99492b7e3713..78e1ce09b1ab 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1189,13 +1189,12 @@ static void fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) { struct fec_enet_private *fep; - struct bufdesc *bdp, *bdp_t; + struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; struct fec_enet_priv_tx_q *txq; struct netdev_queue *nq; int index = 0; - int i, bdnum; int entries_free; fep = netdev_priv(ndev); @@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) if (bdp == txq->cur_tx) break; - bdp_t = bdp; - bdnum = 1; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); - skb = txq->tx_skbuff[index]; - while (!skb) { - bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id); - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); - skb = txq->tx_skbuff[index]; - bdnum++; - } - if (skb_shinfo(skb)->nr_frags && - (status = bdp_t->cbd_sc) & BD_ENET_TX_READY) - break; + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); - for (i = 0; i < bdnum; i++) { - if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - bdp->cbd_datlen, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; - if (i < bdnum - 1) - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); - } + skb = txq->tx_skbuff[index]; txq->tx_skbuff[index] = NULL; + if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + bdp->cbd_datlen, DMA_TO_DEVICE); + bdp->cbd_bufaddr = 0; + if (!skb) { + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + continue; + } /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) vlan_packet_rcvd = true; - skb_copy_to_linear_data_offset(skb, VLAN_HLEN, - data, (2 * ETH_ALEN)); + memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); skb_pull(skb, VLAN_HLEN); } diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index f495796248db..afe7f39cdd7c 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -1057,7 +1057,7 @@ static int mpc52xx_fec_of_resume(struct platform_device *op) } #endif -static struct of_device_id mpc52xx_fec_match[] = { +static const struct of_device_id mpc52xx_fec_match[] = { { .compatible = "fsl,mpc5200b-fec", }, { .compatible = "fsl,mpc5200-fec", }, { .compatible = "mpc5200-fec", }, diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index e0528900db02..1e647beaf989 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -134,7 +134,7 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of) return 0; } -static struct of_device_id mpc52xx_fec_mdio_match[] = { +static const struct of_device_id mpc52xx_fec_mdio_match[] = { { .compatible = "fsl,mpc5200b-mdio", }, { .compatible = "fsl,mpc5200-mdio", }, { .compatible = "mpc5200b-fec-phy", }, diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index a17628769a1f..9b3639eae676 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -916,7 +916,7 @@ static const struct net_device_ops fs_enet_netdev_ops = { #endif }; -static struct of_device_id fs_enet_match[]; +static const struct of_device_id fs_enet_match[]; static int fs_enet_probe(struct platform_device *ofdev) { const struct of_device_id *match; @@ -1082,7 +1082,7 @@ static int fs_enet_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id fs_enet_match[] = { +static const struct of_device_id fs_enet_match[] = { #ifdef CONFIG_FS_ENET_HAS_SCC { .compatible = "fsl,cpm1-scc-enet", diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 1d5617d2d8bd..68a428de0bc0 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -213,7 +213,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id fs_enet_mdio_bb_match[] = { +static const struct of_device_id fs_enet_mdio_bb_match[] = { { .compatible = "fsl,cpm2-mdio-bitbang", }, diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 1648e3582500..2be383e6d258 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -95,7 +95,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, } -static struct of_device_id fs_enet_mdio_fec_match[]; +static const struct of_device_id fs_enet_mdio_fec_match[]; static int fs_enet_mdio_probe(struct platform_device *ofdev) { const struct of_device_id *match; @@ -208,7 +208,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id fs_enet_mdio_fec_match[] = { +static const struct of_device_id fs_enet_mdio_fec_match[] = { { .compatible = "fsl,pq1-fec-mdio", }, diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index d1a91e344e6b..3c40f6b99224 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -294,7 +294,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) #endif -static struct of_device_id fsl_pq_mdio_match[] = { +static const struct of_device_id fsl_pq_mdio_match[] = { #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) { .compatible = "fsl,gianfar-tbi", diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7bf3682cdf47..4ee080d49bc0 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -158,7 +158,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, { u32 lstatus; - bdp->bufPtr = buf; + bdp->bufPtr = cpu_to_be32(buf); lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) @@ -166,7 +166,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, gfar_wmb(); - bdp->lstatus = lstatus; + bdp->lstatus = cpu_to_be32(lstatus); } static int gfar_init_bds(struct net_device *ndev) @@ -200,7 +200,8 @@ static int gfar_init_bds(struct net_device *ndev) /* Set the last descriptor in the ring to indicate wrap */ txbdp--; - txbdp->status |= TXBD_WRAP; + txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | + TXBD_WRAP); } rfbptr = ®s->rfbptr0; @@ -214,7 +215,7 @@ static int gfar_init_bds(struct net_device *ndev) struct sk_buff *skb = rx_queue->rx_skbuff[j]; if (skb) { - bufaddr = rxbdp->bufPtr; + bufaddr = be32_to_cpu(rxbdp->bufPtr); } else { skb = gfar_new_skb(ndev, &bufaddr); if (!skb) { @@ -696,19 +697,28 @@ static int gfar_parse_group(struct device_node *np, grp->priv = priv; spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { - u32 *rxq_mask, *txq_mask; - rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); - txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); + u32 rxq_mask, txq_mask; + int ret; + + grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + + ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask); + if (!ret) { + grp->rx_bit_map = rxq_mask ? + rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } + + ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask); + if (!ret) { + grp->tx_bit_map = txq_mask ? + txq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } if (priv->poll_mode == GFAR_SQ_POLLING) { /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); - } else { /* GFAR_MQ_POLLING */ - grp->rx_bit_map = rxq_mask ? - *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); - grp->tx_bit_map = txq_mask ? - *txq_mask : (DEFAULT_MAPPING >> priv->num_grps); } } else { grp->rx_bit_map = 0xFF; @@ -769,11 +779,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) struct gfar_private *priv = NULL; struct device_node *np = ofdev->dev.of_node; struct device_node *child = NULL; - const u32 *stash; - const u32 *stash_len; - const u32 *stash_idx; + struct property *stash; + u32 stash_len = 0; + u32 stash_idx = 0; unsigned int num_tx_qs, num_rx_qs; - u32 *tx_queues, *rx_queues; unsigned short mode, poll_mode; if (!np) @@ -787,10 +796,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) poll_mode = GFAR_SQ_POLLING; } - /* parse the num of HW tx and rx queues */ - tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); - rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); - if (mode == SQ_SG_MODE) { num_tx_qs = 1; num_rx_qs = 1; @@ -809,8 +814,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) num_tx_qs = num_grps; /* one txq per int group */ num_rx_qs = num_grps; /* one rxq per int group */ } else { /* GFAR_MQ_POLLING */ - num_tx_qs = tx_queues ? *tx_queues : 1; - num_rx_qs = rx_queues ? *rx_queues : 1; + u32 tx_queues, rx_queues; + int ret; + + /* parse the num of HW tx and rx queues */ + ret = of_property_read_u32(np, "fsl,num_tx_queues", + &tx_queues); + num_tx_qs = ret ? 1 : tx_queues; + + ret = of_property_read_u32(np, "fsl,num_rx_queues", + &rx_queues); + num_rx_qs = ret ? 1 : rx_queues; } } @@ -851,13 +865,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (err) goto rx_alloc_failed; + err = of_property_read_string(np, "model", &model); + if (err) { + pr_err("Device model property missing, aborting\n"); + goto rx_alloc_failed; + } + /* Init Rx queue filer rule set linked list */ INIT_LIST_HEAD(&priv->rx_list.list); priv->rx_list.count = 0; mutex_init(&priv->rx_queue_access); - model = of_get_property(np, "model", NULL); - for (i = 0; i < MAXGROUPS; i++) priv->gfargrp[i].regs = NULL; @@ -877,22 +895,22 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) goto err_grp_init; } - stash = of_get_property(np, "bd-stash", NULL); + stash = of_find_property(np, "bd-stash", NULL); if (stash) { priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; priv->bd_stash_en = 1; } - stash_len = of_get_property(np, "rx-stash-len", NULL); + err = of_property_read_u32(np, "rx-stash-len", &stash_len); - if (stash_len) - priv->rx_stash_size = *stash_len; + if (err == 0) + priv->rx_stash_size = stash_len; - stash_idx = of_get_property(np, "rx-stash-idx", NULL); + err = of_property_read_u32(np, "rx-stash-idx", &stash_idx); - if (stash_idx) - priv->rx_stash_index = *stash_idx; + if (err == 0) + priv->rx_stash_index = stash_idx; if (stash_len || stash_idx) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; @@ -919,15 +937,15 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | FSL_GIANFAR_DEV_HAS_TIMER; - ctype = of_get_property(np, "phy-connection-type", NULL); + err = of_property_read_string(np, "phy-connection-type", &ctype); /* We only care about rgmii-id. The rest are autodetected */ - if (ctype && !strcmp(ctype, "rgmii-id")) + if (err == 0 && !strcmp(ctype, "rgmii-id")) priv->interface = PHY_INTERFACE_MODE_RGMII_ID; else priv->interface = PHY_INTERFACE_MODE_MII; - if (of_get_property(np, "fsl,magic-packet", NULL)) + if (of_find_property(np, "fsl,magic-packet", NULL)) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; priv->phy_node = of_parse_phandle(np, "phy-handle", 0); @@ -1884,14 +1902,15 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) if (!tx_queue->tx_skbuff[i]) continue; - dma_unmap_single(priv->dev, txbdp->bufPtr, - txbdp->length, DMA_TO_DEVICE); + dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), + be16_to_cpu(txbdp->length), DMA_TO_DEVICE); txbdp->lstatus = 0; for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) { txbdp++; - dma_unmap_page(priv->dev, txbdp->bufPtr, - txbdp->length, DMA_TO_DEVICE); + dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), + be16_to_cpu(txbdp->length), + DMA_TO_DEVICE); } txbdp++; dev_kfree_skb_any(tx_queue->tx_skbuff[i]); @@ -1911,7 +1930,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) for (i = 0; i < rx_queue->rx_ring_size; i++) { if (rx_queue->rx_skbuff[i]) { - dma_unmap_single(priv->dev, rxbdp->bufPtr, + dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr), priv->rx_buffer_size, DMA_FROM_DEVICE); dev_kfree_skb_any(rx_queue->rx_skbuff[i]); @@ -2167,16 +2186,16 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, */ if (ip_hdr(skb)->protocol == IPPROTO_UDP) { flags |= TXFCB_UDP; - fcb->phcs = udp_hdr(skb)->check; + fcb->phcs = (__force __be16)(udp_hdr(skb)->check); } else - fcb->phcs = tcp_hdr(skb)->check; + fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); /* l3os is the distance between the start of the * frame (skb->data) and the start of the IP hdr. * l4os is the distance between the start of the * l3 hdr and the l4 hdr */ - fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); + fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); fcb->l4os = skb_network_header_len(skb); fcb->flags = flags; @@ -2185,7 +2204,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) { fcb->flags |= TXFCB_VLN; - fcb->vlctl = skb_vlan_tag_get(skb); + fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); } static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, @@ -2298,7 +2317,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_queue->stats.tx_packets++; txbdp = txbdp_start = tx_queue->cur_tx; - lstatus = txbdp->lstatus; + lstatus = be32_to_cpu(txbdp->lstatus); /* Time stamp insertion requires one additional TxBD */ if (unlikely(do_tstamp)) @@ -2306,11 +2325,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_queue->tx_ring_size); if (nr_frags == 0) { - if (unlikely(do_tstamp)) - txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | - TXBD_INTERRUPT); - else + if (unlikely(do_tstamp)) { + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); + + lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); + } else { lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + } } else { /* Place the fragment addresses and lengths into the TxBDs */ for (i = 0; i < nr_frags; i++) { @@ -2320,7 +2342,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) frag_len = skb_shinfo(skb)->frags[i].size; - lstatus = txbdp->lstatus | frag_len | + lstatus = be32_to_cpu(txbdp->lstatus) | frag_len | BD_LFLAG(TXBD_READY); /* Handle the last BD specially */ @@ -2336,11 +2358,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) goto dma_map_err; /* set the TxBD length and buffer pointer */ - txbdp->bufPtr = bufaddr; - txbdp->lstatus = lstatus; + txbdp->bufPtr = cpu_to_be32(bufaddr); + txbdp->lstatus = cpu_to_be32(lstatus); } - lstatus = txbdp_start->lstatus; + lstatus = be32_to_cpu(txbdp_start->lstatus); } /* Add TxPAL between FCB and frame if required */ @@ -2388,7 +2410,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(dma_mapping_error(priv->dev, bufaddr))) goto dma_map_err; - txbdp_start->bufPtr = bufaddr; + txbdp_start->bufPtr = cpu_to_be32(bufaddr); /* If time stamping is requested one additional TxBD must be set up. The * first TxBD points to the FCB and must have a data length of @@ -2396,9 +2418,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) * the full frame length. */ if (unlikely(do_tstamp)) { - txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; - txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | - (skb_headlen(skb) - fcb_len); + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); + + bufaddr = be32_to_cpu(txbdp_start->bufPtr); + bufaddr += fcb_len; + lstatus_ts |= BD_LFLAG(TXBD_READY) | + (skb_headlen(skb) - fcb_len); + + txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; } else { lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); @@ -2421,7 +2449,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) gfar_wmb(); - txbdp_start->lstatus = lstatus; + txbdp_start->lstatus = cpu_to_be32(lstatus); gfar_wmb(); /* force lstatus write before tx_skbuff */ @@ -2460,13 +2488,14 @@ dma_map_err: if (do_tstamp) txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); for (i = 0; i < nr_frags; i++) { - lstatus = txbdp->lstatus; + lstatus = be32_to_cpu(txbdp->lstatus); if (!(lstatus & BD_LFLAG(TXBD_READY))) break; - txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY); - bufaddr = txbdp->bufPtr; - dma_unmap_page(priv->dev, bufaddr, txbdp->length, + lstatus &= ~BD_LFLAG(TXBD_READY); + txbdp->lstatus = cpu_to_be32(lstatus); + bufaddr = be32_to_cpu(txbdp->bufPtr); + dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), DMA_TO_DEVICE); txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); } @@ -2607,7 +2636,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); - lstatus = lbdp->lstatus; + lstatus = be32_to_cpu(lbdp->lstatus); /* Only clean completed frames */ if ((lstatus & BD_LFLAG(TXBD_READY)) && @@ -2616,11 +2645,12 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { next = next_txbd(bdp, base, tx_ring_size); - buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; + buflen = be16_to_cpu(next->length) + + GMAC_FCB_LEN + GMAC_TXPAL_LEN; } else - buflen = bdp->length; + buflen = be16_to_cpu(bdp->length); - dma_unmap_single(priv->dev, bdp->bufPtr, + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), buflen, DMA_TO_DEVICE); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { @@ -2631,17 +2661,18 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) shhwtstamps.hwtstamp = ns_to_ktime(*ns); skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); skb_tstamp_tx(skb, &shhwtstamps); - bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + gfar_clear_txbd_status(bdp); bdp = next; } - bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + gfar_clear_txbd_status(bdp); bdp = next_txbd(bdp, base, tx_ring_size); for (i = 0; i < frags; i++) { - dma_unmap_page(priv->dev, bdp->bufPtr, - bdp->length, DMA_TO_DEVICE); - bdp->lstatus &= BD_LFLAG(TXBD_WRAP); + dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), + be16_to_cpu(bdp->length), + DMA_TO_DEVICE); + gfar_clear_txbd_status(bdp); bdp = next_txbd(bdp, base, tx_ring_size); } @@ -2798,13 +2829,13 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) * were verified, then we tell the kernel that no * checksumming is necessary. Otherwise, it is [FIXME] */ - if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) + if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == + (RXFCB_CIP | RXFCB_CTU)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); } - /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull, struct napi_struct *napi) @@ -2846,8 +2877,9 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, * RXFCB_VLN is pseudo randomly set. */ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && - fcb->flags & RXFCB_VLN) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl); + be16_to_cpu(fcb->flags) & RXFCB_VLN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(fcb->vlctl)); /* Send the packet up the stack */ napi_gro_receive(napi, skb); @@ -2874,7 +2906,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; - while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { + while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) { struct sk_buff *newskb; dma_addr_t bufaddr; @@ -2885,21 +2917,22 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; - dma_unmap_single(priv->dev, bdp->bufPtr, + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), priv->rx_buffer_size, DMA_FROM_DEVICE); - if (unlikely(!(bdp->status & RXBD_ERR) && - bdp->length > priv->rx_buffer_size)) - bdp->status = RXBD_LARGE; + if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && + be16_to_cpu(bdp->length) > priv->rx_buffer_size)) + bdp->status = cpu_to_be16(RXBD_LARGE); /* We drop the frame if we failed to allocate a new buffer */ - if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || - bdp->status & RXBD_ERR)) { - count_errors(bdp->status, dev); + if (unlikely(!newskb || + !(be16_to_cpu(bdp->status) & RXBD_LAST) || + be16_to_cpu(bdp->status) & RXBD_ERR)) { + count_errors(be16_to_cpu(bdp->status), dev); if (unlikely(!newskb)) { newskb = skb; - bufaddr = bdp->bufPtr; + bufaddr = be32_to_cpu(bdp->bufPtr); } else if (skb) dev_kfree_skb(skb); } else { @@ -2908,7 +2941,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) howmany++; if (likely(skb)) { - pkt_len = bdp->length - ETH_FCS_LEN; + pkt_len = be16_to_cpu(bdp->length) - + ETH_FCS_LEN; /* Remove the FCS from the packet length */ skb_put(skb, pkt_len); rx_queue->stats.rx_bytes += pkt_len; @@ -3560,7 +3594,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) phy_print_status(phydev); } -static struct of_device_id gfar_match[] = +static const struct of_device_id gfar_match[] = { { .type = "network", diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 9e1802400c23..daa1d37de642 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -544,12 +544,12 @@ struct txbd8 { union { struct { - u16 status; /* Status Fields */ - u16 length; /* Buffer length */ + __be16 status; /* Status Fields */ + __be16 length; /* Buffer length */ }; - u32 lstatus; + __be32 lstatus; }; - u32 bufPtr; /* Buffer Pointer */ + __be32 bufPtr; /* Buffer Pointer */ }; struct txfcb { @@ -557,28 +557,28 @@ struct txfcb { u8 ptp; /* Flag to enable tx timestamping */ u8 l4os; /* Level 4 Header Offset */ u8 l3os; /* Level 3 Header Offset */ - u16 phcs; /* Pseudo-header Checksum */ - u16 vlctl; /* VLAN control word */ + __be16 phcs; /* Pseudo-header Checksum */ + __be16 vlctl; /* VLAN control word */ }; struct rxbd8 { union { struct { - u16 status; /* Status Fields */ - u16 length; /* Buffer Length */ + __be16 status; /* Status Fields */ + __be16 length; /* Buffer Length */ }; - u32 lstatus; + __be32 lstatus; }; - u32 bufPtr; /* Buffer Pointer */ + __be32 bufPtr; /* Buffer Pointer */ }; struct rxfcb { - u16 flags; + __be16 flags; u8 rq; /* Receive Queue index */ u8 pro; /* Layer 4 Protocol */ u16 reserved; - u16 vlctl; /* VLAN control word */ + __be16 vlctl; /* VLAN control word */ }; struct gianfar_skb_cb { @@ -1287,6 +1287,14 @@ static inline void gfar_wmb(void) #endif } +static inline void gfar_clear_txbd_status(struct txbd8 *bdp) +{ + u32 lstatus = be32_to_cpu(bdp->lstatus); + + lstatus &= BD_LFLAG(TXBD_WRAP); + bdp->lstatus = cpu_to_be32(lstatus); +} + irqreturn_t gfar_receive(int irq, void *dev_id); int startup_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev); diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 16826341a4c9..77353366f33b 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -554,7 +554,7 @@ static int gianfar_ptp_remove(struct platform_device *dev) return 0; } -static struct of_device_id match_table[] = { +static const struct of_device_id match_table[] = { { .compatible = "fsl,etsec-ptp" }, {}, }; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 357e8b576905..bfdccbd58be0 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3930,7 +3930,7 @@ static int ucc_geth_remove(struct platform_device* ofdev) return 0; } -static struct of_device_id ucc_geth_match[] = { +static const struct of_device_id ucc_geth_match[] = { { .type = "network", .compatible = "ucc_geth", diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index 3a83bc2c613c..7b8fe866f603 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -46,17 +46,43 @@ struct tgec_mdio_controller { #define MDIO_DATA(x) (x & 0xffff) #define MDIO_DATA_BSY BIT(31) +struct mdio_fsl_priv { + struct tgec_mdio_controller __iomem *mdio_base; + bool is_little_endian; +}; + +static u32 xgmac_read32(void __iomem *regs, + bool is_little_endian) +{ + if (is_little_endian) + return ioread32(regs); + else + return ioread32be(regs); +} + +static void xgmac_write32(u32 value, + void __iomem *regs, + bool is_little_endian) +{ + if (is_little_endian) + iowrite32(value, regs); + else + iowrite32be(value, regs); +} + /* * Wait until the MDIO bus is free */ static int xgmac_wait_until_free(struct device *dev, - struct tgec_mdio_controller __iomem *regs) + struct tgec_mdio_controller __iomem *regs, + bool is_little_endian) { unsigned int timeout; /* Wait till the bus is free */ timeout = TIMEOUT; - while ((ioread32be(®s->mdio_stat) & MDIO_STAT_BSY) && timeout) { + while ((xgmac_read32(®s->mdio_stat, is_little_endian) & + MDIO_STAT_BSY) && timeout) { cpu_relax(); timeout--; } @@ -73,13 +99,15 @@ static int xgmac_wait_until_free(struct device *dev, * Wait till the MDIO read or write operation is complete */ static int xgmac_wait_until_done(struct device *dev, - struct tgec_mdio_controller __iomem *regs) + struct tgec_mdio_controller __iomem *regs, + bool is_little_endian) { unsigned int timeout; /* Wait till the MDIO write is complete */ timeout = TIMEOUT; - while ((ioread32be(®s->mdio_data) & MDIO_DATA_BSY) && timeout) { + while ((xgmac_read32(®s->mdio_stat, is_little_endian) & + MDIO_STAT_BSY) && timeout) { cpu_relax(); timeout--; } @@ -99,12 +127,14 @@ static int xgmac_wait_until_done(struct device *dev, */ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) { - struct tgec_mdio_controller __iomem *regs = bus->priv; + struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; + struct tgec_mdio_controller __iomem *regs = priv->mdio_base; uint16_t dev_addr; u32 mdio_ctl, mdio_stat; int ret; + bool endian = priv->is_little_endian; - mdio_stat = ioread32be(®s->mdio_stat); + mdio_stat = xgmac_read32(®s->mdio_stat, endian); if (regnum & MII_ADDR_C45) { /* Clause 45 (ie 10G) */ dev_addr = (regnum >> 16) & 0x1f; @@ -115,29 +145,29 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val mdio_stat &= ~MDIO_STAT_ENC; } - iowrite32be(mdio_stat, ®s->mdio_stat); + xgmac_write32(mdio_stat, ®s->mdio_stat, endian); - ret = xgmac_wait_until_free(&bus->dev, regs); + ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; /* Set the port and dev addr */ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); - iowrite32be(mdio_ctl, ®s->mdio_ctl); + xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); /* Set the register address */ if (regnum & MII_ADDR_C45) { - iowrite32be(regnum & 0xffff, ®s->mdio_addr); + xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); - ret = xgmac_wait_until_free(&bus->dev, regs); + ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; } /* Write the value to the register */ - iowrite32be(MDIO_DATA(value), ®s->mdio_data); + xgmac_write32(MDIO_DATA(value), ®s->mdio_data, endian); - ret = xgmac_wait_until_done(&bus->dev, regs); + ret = xgmac_wait_until_done(&bus->dev, regs, endian); if (ret) return ret; @@ -151,14 +181,16 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val */ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { - struct tgec_mdio_controller __iomem *regs = bus->priv; + struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; + struct tgec_mdio_controller __iomem *regs = priv->mdio_base; uint16_t dev_addr; uint32_t mdio_stat; uint32_t mdio_ctl; uint16_t value; int ret; + bool endian = priv->is_little_endian; - mdio_stat = ioread32be(®s->mdio_stat); + mdio_stat = xgmac_read32(®s->mdio_stat, endian); if (regnum & MII_ADDR_C45) { dev_addr = (regnum >> 16) & 0x1f; mdio_stat |= MDIO_STAT_ENC; @@ -167,41 +199,41 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) mdio_stat &= ~MDIO_STAT_ENC; } - iowrite32be(mdio_stat, ®s->mdio_stat); + xgmac_write32(mdio_stat, ®s->mdio_stat, endian); - ret = xgmac_wait_until_free(&bus->dev, regs); + ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; /* Set the Port and Device Addrs */ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); - iowrite32be(mdio_ctl, ®s->mdio_ctl); + xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); /* Set the register address */ if (regnum & MII_ADDR_C45) { - iowrite32be(regnum & 0xffff, ®s->mdio_addr); + xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); - ret = xgmac_wait_until_free(&bus->dev, regs); + ret = xgmac_wait_until_free(&bus->dev, regs, endian); if (ret) return ret; } /* Initiate the read */ - iowrite32be(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl); + xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); - ret = xgmac_wait_until_done(&bus->dev, regs); + ret = xgmac_wait_until_done(&bus->dev, regs, endian); if (ret) return ret; /* Return all Fs if nothing was there */ - if (ioread32be(®s->mdio_stat) & MDIO_STAT_RD_ER) { + if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { dev_err(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); return 0xffff; } - value = ioread32be(®s->mdio_data) & 0xffff; + value = xgmac_read32(®s->mdio_data, endian) & 0xffff; dev_dbg(&bus->dev, "read %04x\n", value); return value; @@ -212,6 +244,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; struct resource res; + struct mdio_fsl_priv *priv; int ret; ret = of_address_to_resource(np, 0, &res); @@ -220,7 +253,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) return ret; } - bus = mdiobus_alloc(); + bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv)); if (!bus) return -ENOMEM; @@ -231,12 +264,19 @@ static int xgmac_mdio_probe(struct platform_device *pdev) snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); /* Set the PHY base address */ - bus->priv = of_iomap(np, 0); - if (!bus->priv) { + priv = bus->priv; + priv->mdio_base = of_iomap(np, 0); + if (!priv->mdio_base) { ret = -ENOMEM; goto err_ioremap; } + if (of_get_property(pdev->dev.of_node, + "little-endian", NULL)) + priv->is_little_endian = true; + else + priv->is_little_endian = false; + ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "cannot register MDIO bus\n"); @@ -248,7 +288,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) return 0; err_registration: - iounmap(bus->priv); + iounmap(priv->mdio_base); err_ioremap: mdiobus_free(bus); @@ -267,7 +307,7 @@ static int xgmac_mdio_remove(struct platform_device *pdev) return 0; } -static struct of_device_id xgmac_mdio_match[] = { +static const struct of_device_id xgmac_mdio_match[] = { { .compatible = "fsl,fman-xmdio", }, diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index c05e50759621..291c87036e17 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -103,7 +103,7 @@ static int ehea_probe_adapter(struct platform_device *dev); static int ehea_remove(struct platform_device *dev); -static struct of_device_id ehea_module_device_table[] = { +static const struct of_device_id ehea_module_device_table[] = { { .name = "lhea", .compatible = "IBM,lhea", @@ -116,7 +116,7 @@ static struct of_device_id ehea_module_device_table[] = { }; MODULE_DEVICE_TABLE(of, ehea_module_device_table); -static struct of_device_id ehea_device_table[] = { +static const struct of_device_id ehea_device_table[] = { { .name = "lhea", .compatible = "IBM,lhea", diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 162762d1a12c..8a17b97baa20 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2981,7 +2981,7 @@ static int emac_remove(struct platform_device *ofdev) } /* XXX Features in here should be replaced by properties... */ -static struct of_device_id emac_match[] = +static const struct of_device_id emac_match[] = { { .type = "network", diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index dddaab11a4c7..fdb5cdb3cd15 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -753,7 +753,7 @@ static int mal_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id mal_platform_match[] = +static const struct of_device_id mal_platform_match[] = { { .compatible = "ibm,mcmal", diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index 457088fc5b06..206ccbbae7bb 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -305,7 +305,7 @@ static int rgmii_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id rgmii_match[] = +static const struct of_device_id rgmii_match[] = { { .compatible = "ibm,rgmii", diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index cb18e7f917c6..32cb6c9007c5 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -148,7 +148,7 @@ static int tah_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id tah_match[] = +static const struct of_device_id tah_match[] = { { .compatible = "ibm,tah", diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 36409ccb75ea..8727b865ea02 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -295,7 +295,7 @@ static int zmii_remove(struct platform_device *ofdev) return 0; } -static struct of_device_id zmii_match[] = +static const struct of_device_id zmii_match[] = { { .compatible = "ibm,zmii", diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 072426a72745..cd7675ac5bf9 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1136,6 +1136,8 @@ restart_poll: ibmveth_replenish_task(adapter); if (frames_processed < budget) { + napi_complete(napi); + /* We think we are done - reenable interrupts, * then check once more to make sure we are done. */ @@ -1144,8 +1146,6 @@ restart_poll: BUG_ON(lpar_rc != H_SUCCESS); - napi_complete(napi); - if (ibmveth_rxq_pending_buffer(adapter) && napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 4be4576d71aa..4e56c3195989 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4084,6 +4084,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) */ set_bit(__E1000_DOWN, &adapter->state); + netif_carrier_off(netdev); + /* disable receives in the hardware */ rctl = er32(RCTL); if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) @@ -4108,8 +4110,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netif_carrier_off(netdev); - spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); spin_unlock(&adapter->stats64_lock); @@ -6874,7 +6874,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_hw_init; if ((adapter->flags & FLAG_IS_ICH) && - (adapter->flags & FLAG_READ_ONLY_NVM)) + (adapter->flags & FLAG_READ_ONLY_NVM) && + (hw->mac.type < e1000_pch_spt)) e1000e_write_protect_nvm_ich8lan(&adapter->hw); hw->mac.ops.get_bus_info(&adapter->hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c1eaab532c15..bc87968098d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5896,6 +5896,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) case i40e_aqc_opc_send_msg_to_peer: dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); break; + case i40e_aqc_opc_nvm_erase: + case i40e_aqc_opc_nvm_update: + i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); + break; default: dev_info(&pf->pdev->dev, "ARQ Error: Unknown event 0x%04x received\n", diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f366b3b96d03..0e07545ccc97 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1776,6 +1776,7 @@ void igb_down(struct igb_adapter *adapter) wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ + netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); /* disable transmits in the hardware */ @@ -1797,12 +1798,9 @@ void igb_down(struct igb_adapter *adapter) } } - del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netif_carrier_off(netdev); - /* record the stats before reset*/ spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, &adapter->stats64); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index c17ea4b8f84d..95af14e139d7 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1519,6 +1519,7 @@ void igbvf_down(struct igbvf_adapter *adapter) rxdctl = er32(RXDCTL(0)); ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + netif_carrier_off(netdev); netif_stop_queue(netdev); /* disable transmits in the hardware */ @@ -1535,8 +1536,6 @@ void igbvf_down(struct igbvf_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); - netif_carrier_off(netdev); - /* record the stats before reset*/ igbvf_update_stats(adapter); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 11a1bdbe3fd9..31f91459312f 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -285,6 +285,8 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) /* prevent the interrupt handler from restarting watchdog */ set_bit(__IXGB_DOWN, &adapter->flags); + netif_carrier_off(netdev); + napi_disable(&adapter->napi); /* waiting for NAPI to complete can re-enable interrupts */ ixgb_irq_disable(adapter); @@ -298,7 +300,6 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) adapter->link_speed = 0; adapter->link_duplex = 0; - netif_carrier_off(netdev); netif_stop_queue(netdev); ixgb_reset(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 7dcbbec09a70..7068e9c3691d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -613,7 +613,6 @@ struct ixgbe_adapter { #define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) #define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) #define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7) #define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) #define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) #define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index c5c97b483d7c..824a7ab79ab6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -171,17 +171,21 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function. - * Disables relaxed ordering Then set pcie completion timeout + * Disables relaxed ordering for archs other than SPARC + * Then set pcie completion timeout * **/ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) { +#ifndef CONFIG_SPARC u32 regval; u32 i; +#endif s32 ret_val; ret_val = ixgbe_start_hw_generic(hw); +#ifndef CONFIG_SPARC /* Disable relaxed ordering */ for (i = 0; ((i < hw->mac.max_tx_queues) && (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { @@ -197,7 +201,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } - +#endif if (ret_val) return ret_val; @@ -1193,6 +1197,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, .prot_autoc_write = &prot_autoc_write_generic, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_82598 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index cf55a0df877b..e0c363948bf4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1977,7 +1977,10 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) */ hw->mac.ops.disable_rx_buff(hw); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + if (regval & IXGBE_RXCTRL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); hw->mac.ops.enable_rx_buff(hw); @@ -2336,6 +2339,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, .prot_autoc_read = &prot_autoc_read_82599, .prot_autoc_write = &prot_autoc_write_82599, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_82599 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 9c66babd4edd..06d8f3cfa099 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -312,7 +312,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) { u32 i; - u32 regval; /* Clear the rate limiters */ for (i = 0; i < hw->mac.max_tx_queues; i++) { @@ -321,20 +320,25 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); +#ifndef CONFIG_SPARC /* Disable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { + u32 regval; + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); } for (i = 0; i < hw->mac.max_rx_queues; i++) { + u32 regval; + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } - +#endif return 0; } @@ -703,7 +707,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) hw->adapter_stopped = true; /* Disable the receive unit */ - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0); + hw->mac.ops.disable_rx(hw); /* Clear interrupt mask to stop interrupts from being generated */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); @@ -2639,7 +2643,10 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) { - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + if (regval & IXGBE_RXCTRL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); return 0; } @@ -3850,3 +3857,44 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) return 0; } +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) +{ + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + if (hw->mac.type != ixgbe_mac_82598EB) { + u32 pfdtxgswc; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + } + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } +} + +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) +{ + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); + + if (hw->mac.type != ixgbe_mac_82598EB) { + if (hw->mac.set_lben) { + u32 pfdtxgswc; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = false; + } + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 8cfadcb2676e..f21f8a165ec4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -130,6 +130,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); #define IXGBE_FAILED_READ_REG 0xffffffffU #define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e5be0dd508de..02ffb3081b11 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1637,9 +1637,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) /* shut down the DMA engines now so they can be reinitialized later */ /* first Rx */ - reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - reg_ctl &= ~IXGBE_RXCTRL_RXEN; - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); + hw->mac.ops.disable_rx(hw); ixgbe_disable_rx_queue(adapter, rx_ring); /* now Tx */ @@ -1670,6 +1668,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) { struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct ixgbe_hw *hw = &adapter->hw; u32 rctl, reg_data; int ret_val; int err; @@ -1713,14 +1712,16 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) goto err_nomem; } - rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); + hw->mac.ops.disable_rx(hw); ixgbe_configure_rx_ring(adapter, rx_ring); - rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; + rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + rctl |= IXGBE_RXCTRL_DMBYPS; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); + hw->mac.ops.enable_rx(hw); + return 0; err_nomem: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 21aea7e7f03f..395dc6bb5d82 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1619,14 +1619,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { - struct ixgbe_adapter *adapter = q_vector->adapter; - if (ixgbe_qv_busy_polling(q_vector)) netif_receive_skb(skb); - else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - napi_gro_receive(&q_vector->napi, skb); else - netif_rx(skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -3705,8 +3701,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) u32 rxctrl, rfctl; /* disable receives while setting up the descriptors */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + hw->mac.ops.disable_rx(hw); ixgbe_setup_psrtype(adapter); ixgbe_setup_rdrxctl(adapter); @@ -3731,6 +3726,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); /* disable drop enable for 82598 parts */ if (hw->mac.type == ixgbe_mac_82598EB) rxctrl |= IXGBE_RXCTRL_DMBYPS; @@ -5014,7 +5010,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *upper; struct list_head *iter; - u32 rxctrl; int i; /* signal that we are down to the interrupt handler */ @@ -5022,8 +5017,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) return; /* do nothing if already down */ /* disable receives */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + hw->mac.ops.disable_rx(hw); /* disable all enabled rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) @@ -6174,7 +6168,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) /* Cause software interrupt to ensure rings are cleaned */ ixgbe_irq_rearm_queues(adapter, eics); - } /** @@ -7507,14 +7500,9 @@ static void ixgbe_netpoll(struct net_device *netdev) if (test_bit(__IXGBE_DOWN, &adapter->state)) return; - adapter->flags |= IXGBE_FLAG_IN_NETPOLL; - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - for (i = 0; i < adapter->num_q_vectors; i++) - ixgbe_msix_clean_rings(0, adapter->q_vector[i]); - } else { - ixgbe_intr(adapter->pdev->irq, netdev); - } - adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; + /* loop through and schedule all active queues */ + for (i = 0; i < adapter->num_q_vectors; i++) + ixgbe_msix_clean_rings(0, adapter->q_vector[i]); } #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 8451f9a7cbd8..c3ddc944f1e9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2462,8 +2462,8 @@ struct ixgbe_hic_read_shadow_ram { struct ixgbe_hic_write_shadow_ram { union ixgbe_hic_hdr2 hdr; - u32 address; - u16 length; + __be32 address; + __be16 length; u16 pad2; u16 data; u16 pad3; @@ -3067,6 +3067,8 @@ struct ixgbe_mac_operations { s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + void (*disable_rx)(struct ixgbe_hw *hw); + void (*enable_rx)(struct ixgbe_hw *hw); void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); /* DMA Coalescing */ @@ -3137,6 +3139,7 @@ struct ixgbe_mac_info { u8 flags; u8 san_mac_rar_index; struct ixgbe_thermal_sensor_data thermal_sensor_data; + bool set_lben; }; struct ixgbe_phy_info { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 49395420c9b3..f5f948d08b43 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -820,6 +820,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, .prot_autoc_write = &prot_autoc_write_generic, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_X540 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 50bf81908dd6..58a3155af7cd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -557,6 +557,47 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) return status; } +/** ixgbe_disable_rx_x550 - Disable RX unit + * + * Enables the Rx DMA unit for x550 + **/ +static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +{ + u32 rxctrl, pfdtxgswc; + s32 status; + struct ixgbe_hic_disable_rxen fw_cmd; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + + fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; + fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; + fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + fw_cmd.port_number = (u8)hw->bus.lan_id; + + status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(struct ixgbe_hic_disable_rxen), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* If we fail - disable RX using register write */ + if (status) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } + } +} + /** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash * @hw: pointer to hardware structure * @@ -1306,8 +1347,8 @@ mac_reset_top: * @enable: enable or disable switch for Ethertype anti-spoofing * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing **/ -void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, - int vf) +static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; @@ -1366,6 +1407,8 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, .init_thermal_sensor_thresh = NULL, \ .prot_autoc_read = &prot_autoc_read_generic, \ .prot_autoc_write = &prot_autoc_write_generic, \ + .enable_rx = &ixgbe_enable_rx_generic, \ + .disable_rx = &ixgbe_disable_rx_x550, \ static struct ixgbe_mac_operations mac_ops_X550 = { X550_COMMON_MAC diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 7412d378b77b..770e21a64388 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -29,138 +28,138 @@ #define _IXGBEVF_DEFINES_H_ /* Device IDs */ -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 -#define IXGBE_VF_IRQ_CLEAR_MASK 7 -#define IXGBE_VF_MAX_TX_QUEUES 8 -#define IXGBE_VF_MAX_RX_QUEUES 8 +#define IXGBE_VF_IRQ_CLEAR_MASK 7 +#define IXGBE_VF_MAX_TX_QUEUES 8 +#define IXGBE_VF_MAX_RX_QUEUES 8 /* DCB define */ #define IXGBE_VF_MAX_TRAFFIC_CLASS 8 /* Link speed */ typedef u32 ixgbe_link_speed; -#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 -#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 #define IXGBE_LINK_SPEED_100_FULL 0x0008 -#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define IXGBE_LINKS_UP 0x40000000 -#define IXGBE_LINKS_SPEED_82599 0x30000000 -#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 -#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 -#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 /* Interrupt Vector Allocation Registers */ -#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ -#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ /* Receive Config masks */ -#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ -#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ -#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ -#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 /* DCA Control */ #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ /* PSRTYPE bit definitions */ -#define IXGBE_PSRTYPE_TCPHDR 0x00000010 -#define IXGBE_PSRTYPE_UDPHDR 0x00000020 -#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 -#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 -#define IXGBE_PSRTYPE_L2HDR 0x00001000 +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 /* SRRCTL bit definitions */ -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ -#define IXGBE_SRRCTL_RDMTS_SHIFT 22 -#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 -#define IXGBE_SRRCTL_DROP_EN 0x10000000 -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 /* Receive Descriptor bit definitions */ -#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ -#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ -#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ -#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ -#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ -#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ -#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ -#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ -#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ -#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ -#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ -#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ -#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ -#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ -#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ -#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ -#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ -#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ -#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ -#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ -#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ -#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ -#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ -#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ -#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ -#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define IXGBE_RXD_PRI_SHIFT 13 -#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define IXGBE_RXD_CFI_SHIFT 12 - -#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ -#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ -#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ -#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ -#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ -#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ -#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ -#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ -#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ - -#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F -#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 -#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 -#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 -#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 -#define IXGBE_RXDADV_RSCCNT_SHIFT 17 -#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 -#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 -#define IXGBE_RXDADV_SPH 0x8000 +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ IXGBE_RXD_ERR_CE | \ @@ -176,16 +175,16 @@ typedef u32 ixgbe_link_speed; IXGBE_RXDADV_ERR_OSE | \ IXGBE_RXDADV_ERR_USE) -#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ -#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor ext (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { @@ -241,44 +240,44 @@ struct ixgbe_adv_tx_context_desc { }; /* Adv Transmit Descriptor Config Masks */ -#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ -#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ -#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ -#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ -#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ -#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ -#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ -#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ -#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ /* Interrupt register bitmasks */ -#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 #define IXGBE_MAX_EITR 0x00000FF8 #define IXGBE_MIN_EITR 8 /* Error Codes */ -#define IXGBE_ERR_INVALID_MAC_ADDR -1 -#define IXGBE_ERR_RESET_FAILED -2 -#define IXGBE_ERR_INVALID_ARGUMENT -3 +#define IXGBE_ERR_INVALID_MAC_ADDR -1 +#define IXGBE_ERR_RESET_FAILED -2 +#define IXGBE_ERR_INVALID_ARGUMENT -3 /* Transmit Config masks */ #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index cc0e5b7ff041..e83c85bf2602 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -100,6 +99,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Link test (on/offline)" }; + #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) static int ixgbevf_get_settings(struct net_device *netdev, @@ -120,6 +120,7 @@ static int ixgbevf_get_settings(struct net_device *netdev, if (link_up) { __u32 speed = SPEED_10000; + switch (link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: speed = SPEED_10000; @@ -145,12 +146,14 @@ static int ixgbevf_get_settings(struct net_device *netdev, static u32 ixgbevf_get_msglevel(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -185,7 +188,8 @@ static void ixgbevf_get_regs(struct net_device *netdev, /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead - * read EICS which is a shadow but doesn't clear EICR */ + * read EICS which is a shadow but doesn't clear EICR + */ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); @@ -390,21 +394,21 @@ clear_reset: static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) { - switch (stringset) { - case ETH_SS_TEST: - return IXGBE_TEST_LEN; - case ETH_SS_STATS: - return IXGBE_GLOBAL_STATS_LEN; - default: - return -EINVAL; - } + switch (stringset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; + case ETH_SS_STATS: + return IXGBE_GLOBAL_STATS_LEN; + default: + return -EINVAL; + } } static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - char *base = (char *) adapter; + char *base = (char *)adapter; int i; #ifdef BP_EXTENDED_STATS u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, @@ -594,8 +598,7 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) } test = reg_test_vf; - /* - * Perform the register test, looping through the test table + /* Perform the register test, looping through the test table * until we either fail or reach the null entry. */ while (test->reg) { @@ -617,8 +620,8 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) break; case WRITE_NO_TEST: ixgbe_write_reg(&adapter->hw, - test->reg + (i * 0x40), - test->write); + test->reg + (i * 0x40), + test->write); break; case TABLE32_TEST: b = reg_pattern_test(adapter, data, @@ -670,7 +673,8 @@ static void ixgbevf_diag_test(struct net_device *netdev, hw_dbg(&adapter->hw, "offline testing starting\n"); /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ + * interfere with test result + */ if (ixgbevf_link_test(adapter, &data[1])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -724,7 +728,7 @@ static int ixgbevf_get_coalesce(struct net_device *netdev, else ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; - /* if in mixed tx/rx queues per vector mode, report only rx settings */ + /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) return 0; @@ -745,12 +749,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, int num_vectors, i; u16 tx_itr_param, rx_itr_param; - /* don't accept tx specific changes if we've got mixed RxTx vectors */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count - && ec->tx_coalesce_usecs) + /* don't accept Tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && + adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) return -EINVAL; - if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) return -EINVAL; @@ -765,7 +768,6 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, else rx_itr_param = adapter->rx_itr_setting; - if (ec->tx_coalesce_usecs > 1) adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; else @@ -781,10 +783,10 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, for (i = 0; i < num_vectors; i++) { q_vector = adapter->q_vector[i]; if (q_vector->tx.count && !q_vector->rx.count) - /* tx only */ + /* Tx only */ q_vector->itr = tx_itr_param; else - /* rx only or mixed */ + /* Rx only or mixed */ q_vector->itr = rx_itr_param; ixgbevf_write_eitr(q_vector); } @@ -793,22 +795,22 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, } static const struct ethtool_ops ixgbevf_ethtool_ops = { - .get_settings = ixgbevf_get_settings, - .get_drvinfo = ixgbevf_get_drvinfo, - .get_regs_len = ixgbevf_get_regs_len, - .get_regs = ixgbevf_get_regs, - .nway_reset = ixgbevf_nway_reset, - .get_link = ethtool_op_get_link, - .get_ringparam = ixgbevf_get_ringparam, - .set_ringparam = ixgbevf_set_ringparam, - .get_msglevel = ixgbevf_get_msglevel, - .set_msglevel = ixgbevf_set_msglevel, - .self_test = ixgbevf_diag_test, - .get_sset_count = ixgbevf_get_sset_count, - .get_strings = ixgbevf_get_strings, - .get_ethtool_stats = ixgbevf_get_ethtool_stats, - .get_coalesce = ixgbevf_get_coalesce, - .set_coalesce = ixgbevf_set_coalesce, + .get_settings = ixgbevf_get_settings, + .get_drvinfo = ixgbevf_get_drvinfo, + .get_regs_len = ixgbevf_get_regs_len, + .get_regs = ixgbevf_get_regs, + .nway_reset = ixgbevf_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = ixgbevf_get_ringparam, + .set_ringparam = ixgbevf_set_ringparam, + .get_msglevel = ixgbevf_get_msglevel, + .set_msglevel = ixgbevf_set_msglevel, + .self_test = ixgbevf_diag_test, + .get_sset_count = ixgbevf_get_sset_count, + .get_strings = ixgbevf_get_strings, + .get_ethtool_stats = ixgbevf_get_ethtool_stats, + .get_coalesce = ixgbevf_get_coalesce, + .set_coalesce = ixgbevf_set_coalesce, }; void ixgbevf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 3a9b356dff01..bc939a1fcb3c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -51,7 +50,8 @@ #define DESC_NEEDED (MAX_SKB_FRAGS + 4) /* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ + * so a DMA handle can be stored along with the buffer + */ struct ixgbevf_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch; unsigned long time_stamp; @@ -132,9 +132,10 @@ struct ixgbevf_ring { u8 __iomem *tail; struct sk_buff *skb; - u16 reg_idx; /* holds the special value that gets the hardware register - * offset associated with this ring, which is different - * for DCB and RSS modes */ + /* holds the special value that gets the hardware register offset + * associated with this ring, which is different for DCB and RSS modes + */ + u16 reg_idx; int queue_index; /* needed for multiqueue queue management */ }; @@ -143,21 +144,21 @@ struct ixgbevf_ring { #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES -#define IXGBEVF_MAX_RSS_QUEUES 2 +#define IXGBEVF_MAX_RSS_QUEUES 2 -#define IXGBEVF_DEFAULT_TXD 1024 -#define IXGBEVF_DEFAULT_RXD 512 -#define IXGBEVF_MAX_TXD 4096 -#define IXGBEVF_MIN_TXD 64 -#define IXGBEVF_MAX_RXD 4096 -#define IXGBEVF_MIN_RXD 64 +#define IXGBEVF_DEFAULT_TXD 1024 +#define IXGBEVF_DEFAULT_RXD 512 +#define IXGBEVF_MAX_TXD 4096 +#define IXGBEVF_MIN_TXD 64 +#define IXGBEVF_MAX_RXD 4096 +#define IXGBEVF_MIN_RXD 64 /* Supported Rx Buffer Sizes */ -#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_2048 2048 +#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ +#define IXGBEVF_RXBUFFER_2048 2048 -#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 -#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 +#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 +#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -186,10 +187,11 @@ struct ixgbevf_ring_container { */ struct ixgbevf_q_vector { struct ixgbevf_adapter *adapter; - u16 v_idx; /* index of q_vector within array, also used for - * finding the bit in EICR and friends that - * represents the vector for this ring */ - u16 itr; /* Interrupt throttle rate written to EITR */ + /* index of q_vector within array, also used for finding the bit in + * EICR and friends that represents the vector for this ring + */ + u16 v_idx; + u16 itr; /* Interrupt throttle rate written to EITR */ struct napi_struct napi; struct ixgbevf_ring_container rx, tx; char name[IFNAMSIZ + 9]; @@ -199,19 +201,21 @@ struct ixgbevf_q_vector { #define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ #define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ #define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ -#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) -#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) +#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) +#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) #define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ #define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ -#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD) -#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \ + IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \ + IXGBEVF_QV_STATE_POLL_YIELD) spinlock_t lock; #endif /* CONFIG_NET_RX_BUSY_POLL */ }; + #ifdef CONFIG_NET_RX_BUSY_POLL static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) { - spin_lock_init(&q_vector->lock); q_vector->state = IXGBEVF_QV_STATE_IDLE; } @@ -220,6 +224,7 @@ static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) { int rc = true; + spin_lock_bh(&q_vector->lock); if (q_vector->state & IXGBEVF_QV_LOCKED) { WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); @@ -240,6 +245,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) { int rc = false; + spin_lock_bh(&q_vector->lock); WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_NAPI_YIELD)); @@ -256,6 +262,7 @@ static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) { int rc = true; + spin_lock_bh(&q_vector->lock); if ((q_vector->state & IXGBEVF_QV_LOCKED)) { q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; @@ -275,6 +282,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) { int rc = false; + spin_lock_bh(&q_vector->lock); WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); @@ -297,6 +305,7 @@ static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) { int rc = true; + spin_lock_bh(&q_vector->lock); if (q_vector->state & IXGBEVF_QV_OWNED) rc = false; @@ -307,8 +316,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) #endif /* CONFIG_NET_RX_BUSY_POLL */ -/* - * microsecond values for various ITR rates shifted by 2 to fit itr register +/* microsecond values for various ITR rates shifted by 2 to fit itr register * with the first 3 bits reserved 0 */ #define IXGBE_MIN_RSC_ITR 24 @@ -345,22 +353,22 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) writel(value, ring->tail); } -#define IXGBEVF_RX_DESC(R, i) \ +#define IXGBEVF_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) -#define IXGBEVF_TX_DESC(R, i) \ +#define IXGBEVF_TX_DESC(R, i) \ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) -#define IXGBEVF_TX_CTXTDESC(R, i) \ +#define IXGBEVF_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ -#define OTHER_VECTOR 1 -#define NON_Q_VECTORS (OTHER_VECTOR) +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) -#define MAX_MSIX_Q_VECTORS 2 +#define MAX_MSIX_Q_VECTORS 2 -#define MIN_MSIX_Q_VECTORS 1 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) /* board specific private data structure */ struct ixgbevf_adapter { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 4186981e562d..4ee15adb3bd9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -25,7 +24,6 @@ *******************************************************************************/ - /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ******************************************************************************/ @@ -170,12 +168,13 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue - */ + **/ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, u8 queue, u8 msix_vector) { u32 ivar, index; struct ixgbe_hw *hw = &adapter->hw; + if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -184,7 +183,7 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, ivar |= msix_vector; IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); } else { - /* tx or rx causes */ + /* Tx or Rx causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); @@ -458,11 +457,12 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } -/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum +/** + * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified - */ + **/ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -492,7 +492,8 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, skb->ip_summed = CHECKSUM_UNNECESSARY; } -/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor +/** + * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated @@ -500,7 +501,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, * This function checks the ring, descriptor, and packet information in * order to populate the checksum, VLAN, protocol, and other fields within * the skb. - */ + **/ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -647,7 +648,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, } } -/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail +/** + * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being adjusted * @@ -657,7 +659,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, * that allow for significant optimizations versus the standard function. * As a result we can do things like drop a frag and maintain an accurate * truesize for the skb. - */ + **/ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, struct sk_buff *skb) { @@ -686,7 +688,8 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, skb->tail += pull_len; } -/* ixgbevf_cleanup_headers - Correct corrupted or empty headers +/** + * ixgbevf_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed @@ -702,7 +705,7 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed. - */ + **/ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -729,12 +732,13 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, return false; } -/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring +/** + * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter - */ + **/ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *old_buff) { @@ -764,7 +768,8 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; } -/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff +/** + * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @rx_desc: descriptor containing length of buffer written by hardware @@ -777,7 +782,7 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) * * The function will then update the page offset if necessary and return * true if the buffer can be reused by the adapter. - */ + **/ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, union ixgbe_adv_rx_desc *rx_desc, @@ -958,7 +963,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, * source pruning. */ if ((skb->pkt_type == PACKET_BROADCAST || - skb->pkt_type == PACKET_MULTICAST) && + skb->pkt_type == PACKET_MULTICAST) && ether_addr_equal(rx_ring->netdev->dev_addr, eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); @@ -1016,7 +1021,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) #endif /* attempt to distribute budget to each queue fairly, but don't allow - * the budget to go below 1 because we'll exit polling */ + * the budget to go below 1 because we'll exit polling + */ if (q_vector->rx.count > 1) per_ring_budget = max(budget/q_vector->rx.count, 1); else @@ -1049,7 +1055,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) /** * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information - */ + **/ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; @@ -1057,8 +1063,7 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) int v_idx = q_vector->v_idx; u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; - /* - * set the WDIS bit to not clear the timer bits and cause an + /* set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt */ itr_reg |= IXGBE_EITR_CNT_WDIS; @@ -1115,12 +1120,12 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; adapter->eims_enable_mask = 0; - /* - * Populate the IVAR table and set the ITR values to the + /* Populate the IVAR table and set the ITR values to the * corresponding register. */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { struct ixgbevf_ring *ring; + q_vector = adapter->q_vector[v_idx]; ixgbevf_for_each_ring(ring, q_vector->rx) @@ -1130,13 +1135,13 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); if (q_vector->tx.ring && !q_vector->rx.ring) { - /* tx only vector */ + /* Tx only vector */ if (adapter->tx_itr_setting == 1) q_vector->itr = IXGBE_10K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { - /* rx or rx/tx vector */ + /* Rx or Rx/Tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else @@ -1167,13 +1172,13 @@ enum latency_range { * @q_vector: structure containing interrupt and ring information * @ring_container: structure containing ring performance data * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. **/ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring_container *ring_container) @@ -1187,7 +1192,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, if (packets == 0) return; - /* simple throttlerate management + /* simple throttle rate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) * 100-1249MB/s bulk (8000 ints/s) @@ -1330,8 +1335,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - /* - * The ideal configuration... + /* The ideal configuration... * We have enough vectors to map one per queue. */ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { @@ -1343,8 +1347,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) goto out; } - /* - * If we don't have enough vectors for a 1-to-1 + /* If we don't have enough vectors for a 1-to-1 * mapping, we'll have to group them so there are * multiple queues per vector. */ @@ -1406,8 +1409,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) q_vector->name, q_vector); if (err) { hw_dbg(&adapter->hw, - "request_irq failed for MSIX interrupt " - "Error: %d\n", err); + "request_irq failed for MSIX interrupt Error: %d\n", + err); goto free_queue_irqs; } } @@ -1415,8 +1418,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) err = request_irq(adapter->msix_entries[vector].vector, &ixgbevf_msix_other, 0, netdev->name, adapter); if (err) { - hw_dbg(&adapter->hw, - "request_irq for msix_other failed: %d\n", err); + hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", + err); goto free_queue_irqs; } @@ -1448,6 +1451,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) for (i = 0; i < q_vectors; i++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; + q_vector->rx.ring = NULL; q_vector->tx.ring = NULL; q_vector->rx.count = 0; @@ -1469,8 +1473,7 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) err = ixgbevf_request_msix_irqs(adapter); if (err) - hw_dbg(&adapter->hw, - "request_irq failed, Error %d\n", err); + hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); return err; } @@ -1659,7 +1662,7 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, /* write value back with RXDCTL.ENABLE bit cleared */ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); - /* the hardware may take up to 100us to really disable the rx queue */ + /* the hardware may take up to 100us to really disable the Rx queue */ do { udelay(10); rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); @@ -1786,7 +1789,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring */ + * the Base and Length of the Rx Descriptor Ring + */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); } @@ -1858,14 +1862,14 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; + netdev_for_each_uc_addr(ha, netdev) { hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); udelay(200); } } else { - /* - * If the list is empty then send message to PF driver to - * clear all macvlans on this VF. + /* If the list is empty then send message to PF driver to + * clear all MAC VLANs on this VF. */ hw->mac.ops.set_uc_addr(hw, 0, NULL); } @@ -2184,7 +2188,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) return; /* do nothing if already down */ - /* disable all enabled rx queues */ + /* disable all enabled Rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); @@ -2406,8 +2410,7 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) int err = 0; int vector, v_budget; - /* - * It's easy to be greedy for MSI-X vectors, but it really + /* It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors * than CPU's. So let's be conservative and only ask for * (roughly) the same number of vectors as there are CPU's. @@ -2418,7 +2421,8 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) v_budget += NON_Q_VECTORS; /* A failure in MSI-X entry allocation isn't fatal, but it does - * mean we disable MSI-X capabilities of the adapter. */ + * mean we disable MSI-X capabilities of the adapter. + */ adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) { @@ -2544,8 +2548,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) err = ixgbevf_alloc_q_vectors(adapter); if (err) { - hw_dbg(&adapter->hw, "Unable to allocate memory for queue " - "vectors\n"); + hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); goto err_alloc_q_vectors; } @@ -2555,8 +2558,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) goto err_alloc_queues; } - hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " - "Tx Queue count = %u\n", + hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); @@ -2600,7 +2602,6 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) /** * ixgbevf_sw_init - Initialize general software structures - * (struct ixgbevf_adapter) * @adapter: board private structure to initialize * * ixgbevf_sw_init initializes the Adapter private data structure. @@ -2615,7 +2616,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) int err; /* PCI config space info */ - hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; @@ -2686,8 +2686,8 @@ out: { \ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ - u64 current_counter = (current_counter_msb << 32) | \ - current_counter_lsb; \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ if (current_counter < last_counter) \ counter += 0x1000000000LL; \ last_counter = current_counter; \ @@ -2758,14 +2758,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) ixgbevf_reinit_locked(adapter); } -/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts - * @adapter - pointer to the device adapter structure +/** + * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter: pointer to the device adapter structure * * This function serves two purposes. First it strobes the interrupt lines * in order to make certain interrupts are occurring. Secondly it sets the * bits needed to check for TX hangs. As a result we should immediately * determine if a hang has occurred. - */ + **/ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -2783,7 +2784,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) set_check_for_tx_hang(adapter->tx_ring[i]); } - /* get one bit for every active tx/rx interrupt vector */ + /* get one bit for every active Tx/Rx interrupt vector */ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { struct ixgbevf_q_vector *qv = adapter->q_vector[i]; @@ -2797,7 +2798,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) /** * ixgbevf_watchdog_update_link - update the link status - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure **/ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) { @@ -2825,7 +2826,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) /** * ixgbevf_watchdog_link_is_up - update netif_carrier status and * print link up message - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure **/ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) { @@ -2850,7 +2851,7 @@ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) /** * ixgbevf_watchdog_link_is_down - update netif_carrier status and * print link down message - * @adapter - pointer to the adapter structure + * @adapter: pointer to the adapter structure **/ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) { @@ -2956,7 +2957,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) /** * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) - * @tx_ring: tx descriptor ring (for a specific queue) to setup + * @tx_ring: Tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ @@ -2983,8 +2984,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; - hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " - "descriptor ring\n"); + hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } @@ -3006,8 +3006,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; - hw_dbg(&adapter->hw, - "Allocation for Tx Queue %u failed\n", i); + hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); break; } @@ -3016,7 +3015,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) /** * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) - * @rx_ring: rx descriptor ring (for a specific queue) to setup + * @rx_ring: Rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ @@ -3065,8 +3064,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); if (!err) continue; - hw_dbg(&adapter->hw, - "Allocation for Rx Queue %u failed\n", i); + hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); break; } return err; @@ -3136,11 +3134,11 @@ static int ixgbevf_open(struct net_device *netdev) if (hw->adapter_stopped) { ixgbevf_reset(adapter); /* if adapter is still stopped then PF isn't up and - * the vf can't start. */ + * the VF can't start. + */ if (hw->adapter_stopped) { err = IXGBE_ERR_MBX; - pr_err("Unable to start - perhaps the PF Driver isn't " - "up yet\n"); + pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); goto err_setup_reset; } } @@ -3163,8 +3161,7 @@ static int ixgbevf_open(struct net_device *netdev) ixgbevf_configure(adapter); - /* - * Map the Tx/Rx rings to the vectors we were allotted. + /* Map the Tx/Rx rings to the vectors we were allotted. * if request_irq will be called in this function map_rings * must be called *before* up_complete */ @@ -3288,6 +3285,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (first->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, @@ -3313,7 +3311,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, *hdr_len += l4len; *hdr_len = skb_transport_offset(skb) + l4len; - /* update gso size and bytecount with header size */ + /* update GSO size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; @@ -3343,6 +3341,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; + switch (first->protocol) { case htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); @@ -3356,8 +3355,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); + "partial checksum but proto=%x!\n", + first->protocol); } break; } @@ -3380,8 +3379,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum but l4 proto=%x!\n", + l4_hdr); } break; } @@ -3405,7 +3404,7 @@ static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); - /* set HW vlan bit if vlan is present */ + /* set HW VLAN bit if VLAN is present */ if (tx_flags & IXGBE_TX_FLAGS_VLAN) cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); @@ -3572,11 +3571,13 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ + * but since that doesn't exist yet, just open code it. + */ smp_mb(); /* We need to check again in a case another CPU has just - * made room available. */ + * made room available. + */ if (likely(ixgbevf_desc_unused(tx_ring) < size)) return -EBUSY; @@ -3615,8 +3616,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_ring = adapter->tx_ring[skb->queue_mapping]; - /* - * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, @@ -3794,8 +3794,7 @@ static int ixgbevf_resume(struct pci_dev *pdev) u32 err; pci_restore_state(pdev); - /* - * pci_restore_state clears dev->state_saved so call + /* pci_restore_state clears dev->state_saved so call * pci_save_state to restore it. */ pci_save_state(pdev); @@ -3930,8 +3929,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_dma; } pci_using_dac = 0; @@ -3962,8 +3960,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); - /* - * call save state here in standalone driver because it relies on + /* call save state here in standalone driver because it relies on * adapter struct to exist, and needs to call netdev_priv */ pci_save_state(pdev); @@ -3978,7 +3975,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ixgbevf_assign_netdev_ops(netdev); - /* Setup hw api */ + /* Setup HW API */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; @@ -3998,11 +3995,11 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXCSUM; + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | @@ -4131,7 +4128,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) * * This function is called after a PCI bus error affecting * this device has been detected. - */ + **/ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -4166,7 +4163,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the ixgbevf_resume routine. - */ + **/ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4194,7 +4191,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the ixgbevf_resume routine. - */ + **/ static void ixgbevf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4214,17 +4211,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = { }; static struct pci_driver ixgbevf_driver = { - .name = ixgbevf_driver_name, - .id_table = ixgbevf_pci_tbl, - .probe = ixgbevf_probe, - .remove = ixgbevf_remove, + .name = ixgbevf_driver_name, + .id_table = ixgbevf_pci_tbl, + .probe = ixgbevf_probe, + .remove = ixgbevf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ - .suspend = ixgbevf_suspend, - .resume = ixgbevf_resume, + .suspend = ixgbevf_suspend, + .resume = ixgbevf_resume, #endif - .shutdown = ixgbevf_shutdown, - .err_handler = &ixgbevf_err_handler + .shutdown = ixgbevf_shutdown, + .err_handler = &ixgbevf_err_handler }; /** @@ -4236,6 +4233,7 @@ static struct pci_driver ixgbevf_driver = { static int __init ixgbevf_init_module(void) { int ret; + pr_info("%s - version %s\n", ixgbevf_driver_string, ixgbevf_driver_version); @@ -4266,6 +4264,7 @@ static void __exit ixgbevf_exit_module(void) char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) { struct ixgbevf_adapter *adapter = hw->back; + return adapter->netdev->name; } diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index d5028ddf4b31..dc68fea4894b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -52,10 +51,10 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) } /** - * ixgbevf_poll_for_ack - Wait for message acknowledgement + * ixgbevf_poll_for_ack - Wait for message acknowledgment * @hw: pointer to the HW structure * - * returns 0 if it successfully received a message acknowledgement + * returns 0 if it successfully received a message acknowledgment **/ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) { @@ -213,7 +212,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) s32 ret_val = IXGBE_ERR_MBX; if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | - IXGBE_VFMAILBOX_RSTI))) { + IXGBE_VFMAILBOX_RSTI))) { ret_val = 0; hw->mbx.stats.rsts++; } @@ -234,7 +233,7 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) /* Take ownership of the buffer */ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); - /* reserve mailbox for vf use */ + /* reserve mailbox for VF use */ if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) ret_val = 0; @@ -254,8 +253,7 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) s32 ret_val; u16 i; - - /* lock the mailbox to prevent pf/vf race condition */ + /* lock the mailbox to prevent PF/VF race condition */ ret_val = ixgbevf_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_write; @@ -279,7 +277,7 @@ out_no_write: } /** - * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf + * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -291,7 +289,7 @@ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) s32 ret_val = 0; u16 i; - /* lock the mailbox to prevent pf/vf race condition */ + /* lock the mailbox to prevent PF/VF race condition */ ret_val = ixgbevf_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_read; @@ -311,17 +309,18 @@ out_no_read: } /** - * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox + * ixgbevf_init_mbx_params_vf - set initial values for VF mailbox * @hw: pointer to the HW structure * - * Initializes the hw->mbx struct to correct values for vf mailbox + * Initializes the hw->mbx struct to correct values for VF mailbox */ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout - * value to begin communications */ + * value to begin communications + */ mbx->timeout = 0; mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; @@ -337,13 +336,13 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) } const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { - .init_params = ixgbevf_init_mbx_params_vf, - .read = ixgbevf_read_mbx_vf, - .write = ixgbevf_write_mbx_vf, - .read_posted = ixgbevf_read_posted_mbx, - .write_posted = ixgbevf_write_posted_mbx, - .check_for_msg = ixgbevf_check_for_msg_vf, - .check_for_ack = ixgbevf_check_for_ack_vf, - .check_for_rst = ixgbevf_check_for_rst_vf, + .init_params = ixgbevf_init_mbx_params_vf, + .read = ixgbevf_read_mbx_vf, + .write = ixgbevf_write_mbx_vf, + .read_posted = ixgbevf_read_posted_mbx, + .write_posted = ixgbevf_write_posted_mbx, + .check_for_msg = ixgbevf_check_for_msg_vf, + .check_for_ack = ixgbevf_check_for_ack_vf, + .check_for_rst = ixgbevf_check_for_rst_vf, }; diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 0bc30058ff82..6253e9335cae 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -30,56 +29,54 @@ #include "vf.h" -#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 -#define IXGBE_VFMAILBOX 0x002FC -#define IXGBE_VFMBMEM 0x00200 +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 /* Define mailbox register bits */ -#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ -#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) -#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) +#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) +#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) -#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ #define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ -#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ #define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ -#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ - +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is IXGBE_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - * clear to send requests */ -#define IXGBE_VT_MSGINFO_SHIFT 16 +/* Messages below or'd with this are the ACK */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 +#define IXGBE_VT_MSGINFO_SHIFT 16 /* bits 23:16 are used for exra info for certain messages */ -#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) /* definitions to support mailbox API version negotiation */ -/* - * each element denotes a version of the API; existing numbers may not +/* each element denotes a version of the API; existing numbers may not * change; any additions must go at the end */ enum ixgbe_pfvf_api_rev { @@ -91,10 +88,10 @@ enum ixgbe_pfvf_api_rev { }; /* mailbox API, legacy requests */ -#define IXGBE_VF_RESET 0x01 /* VF requests reset */ -#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ /* mailbox API, version 1.0 VF requests */ #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ @@ -105,20 +102,20 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */ /* GET_QUEUES return data indices within the mailbox */ -#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ -#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ -#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ -#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port VLAN */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ /* length of permanent address message returned from PF */ -#define IXGBE_VF_PERMADDR_MSG_LEN 4 +#define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ -#define IXGBE_VF_MC_TYPE_WORD 3 +#define IXGBE_VF_MC_TYPE_WORD 3 -#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ -#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ /* forward declaration of the HW struct */ struct ixgbe_hw; diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h index 3e712fd6e695..2764fd16261f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -28,58 +27,58 @@ #ifndef _IXGBEVF_REGS_H_ #define _IXGBEVF_REGS_H_ -#define IXGBE_VFCTRL 0x00000 -#define IXGBE_VFSTATUS 0x00008 -#define IXGBE_VFLINKS 0x00010 -#define IXGBE_VFFRTIMER 0x00048 -#define IXGBE_VFRXMEMWRAP 0x03190 -#define IXGBE_VTEICR 0x00100 -#define IXGBE_VTEICS 0x00104 -#define IXGBE_VTEIMS 0x00108 -#define IXGBE_VTEIMC 0x0010C -#define IXGBE_VTEIAC 0x00110 -#define IXGBE_VTEIAM 0x00114 -#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) -#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) -#define IXGBE_VTIVAR_MISC 0x00140 -#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) -#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) -#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) -#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) -#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) -#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) -#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) -#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) -#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) -#define IXGBE_VFPSRTYPE 0x00300 -#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) -#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) -#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) -#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) -#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) -#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) -#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) -#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) -#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) -#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) -#define IXGBE_VFGPRC 0x0101C -#define IXGBE_VFGPTC 0x0201C -#define IXGBE_VFGORC_LSB 0x01020 -#define IXGBE_VFGORC_MSB 0x01024 -#define IXGBE_VFGOTC_LSB 0x02020 -#define IXGBE_VFGOTC_MSB 0x02024 -#define IXGBE_VFMPRC 0x01034 -#define IXGBE_VFMRQC 0x3000 -#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) -#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) +#define IXGBE_VFCTRL 0x00000 +#define IXGBE_VFSTATUS 0x00008 +#define IXGBE_VFLINKS 0x00010 +#define IXGBE_VFFRTIMER 0x00048 +#define IXGBE_VFRXMEMWRAP 0x03190 +#define IXGBE_VTEICR 0x00100 +#define IXGBE_VTEICS 0x00104 +#define IXGBE_VTEIMS 0x00108 +#define IXGBE_VTEIMC 0x0010C +#define IXGBE_VTEIAC 0x00110 +#define IXGBE_VTEIAM 0x00114 +#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) +#define IXGBE_VTIVAR_MISC 0x00140 +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) +#define IXGBE_VFPSRTYPE 0x00300 +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) +#define IXGBE_VFGPRC 0x0101C +#define IXGBE_VFGPTC 0x0201C +#define IXGBE_VFGORC_LSB 0x01020 +#define IXGBE_VFGORC_MSB 0x01024 +#define IXGBE_VFGOTC_LSB 0x02020 +#define IXGBE_VFGOTC_MSB 0x02024 +#define IXGBE_VFMPRC 0x01034 +#define IXGBE_VFMRQC 0x3000 +#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) +#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) /* VFMRQC bits */ -#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ -#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 -#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) +#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) #endif /* _IXGBEVF_REGS_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index f510a5822f90..2614fd328e47 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -102,9 +101,10 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) mdelay(10); - /* set our "perm_addr" based on info provided by PF */ - /* also set up the mc_filter_type which is piggy backed - * on the mac address in word 3 */ + /* set our "perm_addr" based on info provided by PF + * also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 + */ ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); if (ret_val) return ret_val; @@ -117,7 +117,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_INVALID_MAC_ADDR; - memcpy(hw->mac.perm_addr, addr, ETH_ALEN); + ether_addr_copy(hw->mac.perm_addr, addr); hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; return 0; @@ -138,8 +138,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) u32 reg_val; u16 i; - /* - * Set the adapter_stopped flag so other driver functions stop touching + /* Set the adapter_stopped flag so other driver functions stop touching * the hardware */ hw->adapter_stopped = true; @@ -182,7 +181,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) * * Extracts the 12 bits, from a multicast address, to determine which * bit-vector to set in the multicast table. The hardware uses 12 bits, from - * incoming rx multicast addresses, to determine the bit-vector to check in + * incoming Rx multicast addresses, to determine the bit-vector to check in * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. @@ -220,7 +219,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) **/ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) { - memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); + ether_addr_copy(mac_addr, hw->mac.perm_addr); return 0; } @@ -233,8 +232,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) s32 ret_val; memset(msgbuf, 0, sizeof(msgbuf)); - /* - * If index is one then this is the start of a new list and needs + /* If index is one then this is the start of a new list and needs * indication to the PF so it can do it's own list management. * If it is zero then that tells the PF to just clear all of * this VF's macvlans and there is no new list. @@ -242,7 +240,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; msgbuf[0] |= IXGBE_VF_SET_MACVLAN; if (addr) - memcpy(msg_addr, addr, ETH_ALEN); + ether_addr_copy(msg_addr, addr); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) @@ -275,7 +273,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, memset(msgbuf, 0, sizeof(msgbuf)); msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; - memcpy(msg_addr, addr, ETH_ALEN); + ether_addr_copy(msg_addr, addr); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) @@ -292,7 +290,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, } static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, - u32 *msg, u16 size) + u32 *msg, u16 size) { struct ixgbe_mbx_info *mbx = &hw->mbx; u32 retmsg[IXGBE_VFMAILBOX_SIZE]; @@ -348,7 +346,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, } /** - * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address + * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID * @vind: unused by VF drivers @@ -462,7 +460,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, } /* if the read failed it could just be a mailbox collision, best wait - * until we are called again and don't report an error */ + * until we are called again and don't report an error + */ if (mbx->ops.read(hw, &in_msg, 1)) goto out; @@ -480,7 +479,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, } /* if we passed all the tests above then the link is up and we no - * longer need to check for link */ + * longer need to check for link + */ mac->get_link_status = false; out: @@ -561,8 +561,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; - /* - * if we we didn't get an ACK there must have been + /* if we we didn't get an ACK there must have been * some sort of mailbox error so we should treat it * as such */ @@ -595,17 +594,17 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, } static const struct ixgbe_mac_operations ixgbevf_mac_ops = { - .init_hw = ixgbevf_init_hw_vf, - .reset_hw = ixgbevf_reset_hw_vf, - .start_hw = ixgbevf_start_hw_vf, - .get_mac_addr = ixgbevf_get_mac_addr_vf, - .stop_adapter = ixgbevf_stop_hw_vf, - .setup_link = ixgbevf_setup_mac_link_vf, - .check_link = ixgbevf_check_mac_link_vf, - .set_rar = ixgbevf_set_rar_vf, - .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, - .set_uc_addr = ixgbevf_set_uc_addr_vf, - .set_vfta = ixgbevf_set_vfta_vf, + .init_hw = ixgbevf_init_hw_vf, + .reset_hw = ixgbevf_reset_hw_vf, + .start_hw = ixgbevf_start_hw_vf, + .get_mac_addr = ixgbevf_get_mac_addr_vf, + .stop_adapter = ixgbevf_stop_hw_vf, + .setup_link = ixgbevf_setup_mac_link_vf, + .check_link = ixgbevf_check_mac_link_vf, + .set_rar = ixgbevf_set_rar_vf, + .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .set_uc_addr = ixgbevf_set_uc_addr_vf, + .set_vfta = ixgbevf_set_vfta_vf, }; const struct ixgbevf_info ixgbevf_82599_vf_info = { diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 5b172427f459..6688250da7a1 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -169,7 +168,7 @@ struct ixgbevf_hw_stats { }; struct ixgbevf_info { - enum ixgbe_mac_type mac; + enum ixgbe_mac_type mac; const struct ixgbe_mac_operations *mac_ops; }; @@ -185,23 +184,26 @@ static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) return; writel(value, reg_addr + reg); } + #define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg); #define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r) static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, - u32 offset, u32 value) + u32 offset, u32 value) { ixgbe_write_reg(hw, reg + (offset << 2), value); } + #define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, - u32 offset) + u32 offset) { return ixgbevf_read_reg(hw, reg + (offset << 2)); } + #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); @@ -209,4 +211,3 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc); #endif /* __IXGBE_VF_H__ */ - diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index c59ed925adaf..a8339e98ad24 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev) /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->rx_mode_task); - mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); - #ifdef CONFIG_MLX4_EN_VXLAN if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) vxlan_get_rx_port(dev); @@ -2379,6 +2377,33 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, } #endif +static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index]; + struct mlx4_update_qp_params params; + int err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) + return -EOPNOTSUPP; + + /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */ + if (maxrate >> 12) { + params.rate_unit = MLX4_QP_RATE_LIMIT_GBS; + params.rate_val = maxrate / 1000; + } else if (maxrate) { + params.rate_unit = MLX4_QP_RATE_LIMIT_MBS; + params.rate_val = maxrate; + } else { /* zero serves to revoke the QP rate-limitation */ + params.rate_unit = 0; + params.rate_val = 0; + } + + err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, + ¶ms); + return err; +} + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2410,6 +2435,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, #endif + .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, }; static const struct net_device_ops mlx4_netdev_ops_master = { @@ -2444,6 +2470,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, #endif + .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, }; struct mlx4_en_bond { @@ -2853,6 +2880,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); + mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); + return 0; out: diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 242bcee5d774..4a471f5d1b56 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -144,7 +144,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [19] = "Performance optimized for limited rule configuration flow steering support", [20] = "Recoverable error events support", [21] = "Port Remap support", - [22] = "QCN support" + [22] = "QCN support", + [23] = "QP rate limiting support" }; int i; @@ -697,6 +698,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac +#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc +#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 +#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 + dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -904,6 +909,18 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET); dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); + dev_cap->rl_caps.num_rates = size; + if (dev_cap->rl_caps.num_rates) { + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET); + dev_cap->rl_caps.max_val = size & 0xfff; + dev_cap->rl_caps.max_unit = size >> 14; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET); + dev_cap->rl_caps.min_val = size & 0xfff; + dev_cap->rl_caps.min_unit = size >> 14; + } + MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); if (field32 & (1 << 16)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; @@ -979,6 +996,15 @@ void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->dmfs_high_rate_qpn_base); mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", dev_cap->dmfs_high_rate_qpn_range); + + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) { + struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps; + + mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n", + rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val, + rl_caps->min_unit, rl_caps->min_val); + } + dump_dev_cap_flags(dev, dev_cap->flags); dump_dev_cap_flags2(dev, dev_cap->flags2); } @@ -1075,6 +1101,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, u64 flags; int err = 0; u8 field; + u16 field16; u32 bmme_flags, field32; int real_port; int slave_port; @@ -1158,6 +1185,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, field &= 0xfe; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + /* turn off QP max-rate limiting for guests */ + field16 = 0; + MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index f44f7f6017ed..863655bd3947 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -127,6 +127,7 @@ struct mlx4_dev_cap { u32 max_counters; u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; + struct mlx4_rate_limit_caps rl_caps; struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7e487223489a..43aa76775b5f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -489,6 +489,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; } + dev->caps.rl_caps = dev_cap->rl_caps; + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = dev->caps.dmfs_high_rate_qpn_range; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 94553b501c76..8ad241bbcf25 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -453,7 +453,7 @@ struct mlx4_en_port_stats { unsigned long rx_chksum_none; unsigned long rx_chksum_complete; unsigned long tx_chksum_offload; -#define NUM_PORT_STATS 9 +#define NUM_PORT_STATS 10 }; struct mlx4_en_perf_stats { diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index eda29dbbfcd2..69e4462e4ee4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -442,6 +442,11 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); } + if (attr & MLX4_UPDATE_QP_RATE_LIMIT) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT; + cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val); + } + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); cmd->qp_mask = cpu_to_be64(qp_mask); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index d43e25914d19..c258f8625aac 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2947,8 +2947,12 @@ static int verify_qp_parameters(struct mlx4_dev *dev, qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; optpar = be32_to_cpu(*(__be32 *) inbox->buf); - if (slave != mlx4_master_func_num(dev)) + if (slave != mlx4_master_func_num(dev)) { qp_ctx->params2 &= ~MLX4_QP_BIT_FPP; + /* setting QP rate-limit is disallowed for VFs */ + if (qp_ctx->rate_limit_params) + return -EPERM; + } switch (qp_type) { case MLX4_QP_ST_RC: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 5394a8486558..350c6297fe5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -697,7 +697,6 @@ err_dbg: debugfs_remove(priv->dbg_root); return err; } -EXPORT_SYMBOL(mlx5_dev_init); static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index d36599f47af5..7bf9c028d8d7 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -1557,7 +1557,7 @@ static int octeon_mgmt_remove(struct platform_device *pdev) return 0; } -static struct of_device_id octeon_mgmt_match[] = { +static const struct of_device_id octeon_mgmt_match[] = { { .compatible = "cavium,octeon-5750-mix", }, diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 223348d8cc07..c9558e6d57ad 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -30,6 +30,7 @@ #include <linux/if_vlan.h> #include <linux/if_bridge.h> #include <linux/bitops.h> +#include <linux/ctype.h> #include <net/switchdev.h> #include <net/rtnetlink.h> #include <net/ip_fib.h> @@ -1630,6 +1631,53 @@ rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker, return 0; } +struct port_name { + char *buf; + size_t len; +}; + +static int +rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct port_name *name = priv; + struct rocker_tlv *attr; + size_t i, j, len; + char *str; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; + if (!attr) + return -EIO; + + len = min_t(size_t, rocker_tlv_len(attr), name->len); + str = rocker_tlv_data(attr); + + /* make sure name only contains alphanumeric characters */ + for (i = j = 0; i < len; ++i) { + if (isalnum(str[i])) { + name->buf[j] = str[i]; + j++; + } + } + + if (j == 0) + return -EIO; + + name->buf[j] = '\0'; + + return 0; +} + static int rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker, struct rocker_port *rocker_port, @@ -2955,11 +3003,16 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, struct rocker_port *p; struct rocker *rocker = rocker_port->rocker; u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 group_ids[ROCKER_FP_PORTS_MAX]; + u32 *group_ids; u8 group_count = 0; - int err; + int err = 0; int i; + group_ids = kcalloc(rocker->port_count, sizeof(u32), + rocker_op_flags_gfp(flags)); + if (!group_ids) + return -ENOMEM; + /* Adjust the flood group for this VLAN. The flood group * references an L2 interface group for each port in this * VLAN. @@ -2977,7 +3030,7 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, /* If there are no bridged ports in this VLAN, we're done */ if (group_count == 0) - return 0; + goto no_ports_in_vlan; err = rocker_group_l2_flood(rocker_port, flags, vlan_id, group_count, group_ids, @@ -2986,6 +3039,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, netdev_err(rocker_port->dev, "Error (%d) port VLAN l2 flood group\n", err); +no_ports_in_vlan: + kfree(group_ids); return err; } @@ -4131,8 +4186,42 @@ static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, rocker_port->brport_flags, mask); } -static int rocker_port_switch_parent_id_get(struct net_device *dev, - struct netdev_phys_item_id *psid) +static int rocker_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct port_name name = { .buf = buf, .len = len }; + int err; + + err = rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_phys_name_proc, + &name, false); + + return err ? -EOPNOTSUPP : 0; +} + +static const struct net_device_ops rocker_port_netdev_ops = { + .ndo_open = rocker_port_open, + .ndo_stop = rocker_port_stop, + .ndo_start_xmit = rocker_port_xmit, + .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid, + .ndo_fdb_add = rocker_port_fdb_add, + .ndo_fdb_del = rocker_port_fdb_del, + .ndo_fdb_dump = rocker_port_fdb_dump, + .ndo_bridge_setlink = rocker_port_bridge_setlink, + .ndo_bridge_getlink = rocker_port_bridge_getlink, + .ndo_get_phys_port_name = rocker_port_get_phys_port_name, +}; + +/******************** + * swdev interface + ********************/ + +static int rocker_port_swdev_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) { struct rocker_port *rocker_port = netdev_priv(dev); struct rocker *rocker = rocker_port->rocker; @@ -4142,18 +4231,18 @@ static int rocker_port_switch_parent_id_get(struct net_device *dev, return 0; } -static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state) +static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state) { struct rocker_port *rocker_port = netdev_priv(dev); return rocker_port_stp_update(rocker_port, state); } -static int rocker_port_switch_fib_ipv4_add(struct net_device *dev, - __be32 dst, int dst_len, - struct fib_info *fi, - u8 tos, u8 type, - u32 nlflags, u32 tb_id) +static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev, + __be32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, + u32 nlflags, u32 tb_id) { struct rocker_port *rocker_port = netdev_priv(dev); int flags = 0; @@ -4162,10 +4251,10 @@ static int rocker_port_switch_fib_ipv4_add(struct net_device *dev, fi, tb_id, flags); } -static int rocker_port_switch_fib_ipv4_del(struct net_device *dev, - __be32 dst, int dst_len, - struct fib_info *fi, - u8 tos, u8 type, u32 tb_id) +static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev, + __be32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) { struct rocker_port *rocker_port = netdev_priv(dev); int flags = ROCKER_OP_FLAG_REMOVE; @@ -4174,22 +4263,11 @@ static int rocker_port_switch_fib_ipv4_del(struct net_device *dev, fi, tb_id, flags); } -static const struct net_device_ops rocker_port_netdev_ops = { - .ndo_open = rocker_port_open, - .ndo_stop = rocker_port_stop, - .ndo_start_xmit = rocker_port_xmit, - .ndo_set_mac_address = rocker_port_set_mac_address, - .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid, - .ndo_fdb_add = rocker_port_fdb_add, - .ndo_fdb_del = rocker_port_fdb_del, - .ndo_fdb_dump = rocker_port_fdb_dump, - .ndo_bridge_setlink = rocker_port_bridge_setlink, - .ndo_bridge_getlink = rocker_port_bridge_getlink, - .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get, - .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update, - .ndo_switch_fib_ipv4_add = rocker_port_switch_fib_ipv4_add, - .ndo_switch_fib_ipv4_del = rocker_port_switch_fib_ipv4_del, +static const struct swdev_ops rocker_port_swdev_ops = { + .swdev_parent_id_get = rocker_port_swdev_parent_id_get, + .swdev_port_stp_update = rocker_port_swdev_port_stp_update, + .swdev_fib_ipv4_add = rocker_port_swdev_fib_ipv4_add, + .swdev_fib_ipv4_del = rocker_port_swdev_fib_ipv4_del, }; /******************** @@ -4544,6 +4622,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port_dev_addr_init(rocker, rocker_port); dev->netdev_ops = &rocker_port_netdev_ops; dev->ethtool_ops = &rocker_port_ethtool_ops; + dev->swdev_ops = &rocker_port_swdev_ops; netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, NAPI_POLL_WEIGHT); netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 51e430d25138..a4e9591d7457 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -158,6 +158,7 @@ enum { ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, ROCKER_TLV_CMD_PORT_SETTINGS_MAX = diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 5d093dc0f5f5..8678e39aba08 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2248,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev) const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; - struct resource *res; + struct resource *res, *ires; unsigned int __iomem *addr; unsigned long irq_flags = SMC_IRQ_FLAGS; - unsigned long irq_resflags; int ret; ndev = alloc_etherdev(sizeof(struct smc_local)); @@ -2343,19 +2342,16 @@ static int smc_drv_probe(struct platform_device *pdev) goto out_free_netdev; } - ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq <= 0) { + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!ires) { ret = -ENODEV; goto out_release_io; } - /* - * If this platform does not specify any special irqflags, or if - * the resource supplies a trigger, override the irqflags with - * the trigger flags from the resource. - */ - irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); - if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) - irq_flags = irq_resflags & IRQF_TRIGGER_MASK; + + ndev->irq = ires->start; + + if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) + irq_flags = ires->flags & IRQF_TRIGGER_MASK; ret = smc_request_attrib(pdev, ndev); if (ret) diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index f6a71092e135..631e0afd07d2 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -88,6 +88,7 @@ config TI_CPTS config TI_KEYSTONE_NETCP tristate "TI Keystone NETCP Core Support" select TI_CPSW_ALE + select TI_DAVINCI_MDIO depends on OF depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS ---help--- diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 906e9bc412f5..bbacf5cccec2 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -41,7 +41,10 @@ struct netcp_tx_pipe { struct netcp_device *netcp_device; void *dma_queue; unsigned int dma_queue_id; - u8 dma_psflags; + /* To port for packet forwarded to switch. Used only by ethss */ + u8 switch_to_port; +#define SWITCH_TO_PORT_IN_TAGINFO BIT(0) + u8 flags; void *dma_channel; const char *dma_chan_name; }; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 9f14d8b515c7..43efc3a0cda5 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1098,9 +1098,9 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, struct netcp_tx_pipe *tx_pipe = NULL; struct netcp_hook_list *tx_hook; struct netcp_packet p_info; - u32 packet_info = 0; unsigned int dma_sz; dma_addr_t dma; + u32 tmp = 0; int ret = 0; p_info.netcp = netcp; @@ -1140,20 +1140,27 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp, memmove(p_info.psdata, p_info.psdata + p_info.psdata_len, p_info.psdata_len); set_words(psdata, p_info.psdata_len, psdata); - packet_info |= - (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << + tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << KNAV_DMA_DESC_PSLEN_SHIFT; } - packet_info |= KNAV_DMA_DESC_HAS_EPIB | + tmp |= KNAV_DMA_DESC_HAS_EPIB | ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << - KNAV_DMA_DESC_RETQ_SHIFT) | - ((tx_pipe->dma_psflags & KNAV_DMA_DESC_PSFLAG_MASK) << - KNAV_DMA_DESC_PSFLAG_SHIFT); + KNAV_DMA_DESC_RETQ_SHIFT); - set_words(&packet_info, 1, &desc->packet_info); + if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) { + tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) << + KNAV_DMA_DESC_PSFLAG_SHIFT); + } + + set_words(&tmp, 1, &desc->packet_info); set_words((u32 *)&skb, 1, &desc->pad[0]); + if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { + tmp = tx_pipe->switch_to_port; + set_words((u32 *)&tmp, 1, &desc->tag_info); + } + /* submit packet descriptor */ ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma, &dma_sz); @@ -2127,7 +2134,7 @@ static int netcp_remove(struct platform_device *pdev) return 0; } -static struct of_device_id of_match[] = { +static const struct of_device_id of_match[] = { { .compatible = "ti,netcp-1.0", }, {}, }; diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 84f5ce525750..2bef655279f3 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -40,35 +40,61 @@ #define GBE_MODULE_NAME "netcp-gbe" #define GBE_SS_VERSION_14 0x4ed21104 +#define GBE_SS_REG_INDEX 0 +#define GBE_SGMII34_REG_INDEX 1 +#define GBE_SM_REG_INDEX 2 +/* offset relative to base of GBE_SS_REG_INDEX */ #define GBE13_SGMII_MODULE_OFFSET 0x100 -#define GBE13_SGMII34_MODULE_OFFSET 0x400 -#define GBE13_SWITCH_MODULE_OFFSET 0x800 -#define GBE13_HOST_PORT_OFFSET 0x834 -#define GBE13_SLAVE_PORT_OFFSET 0x860 -#define GBE13_EMAC_OFFSET 0x900 -#define GBE13_SLAVE_PORT2_OFFSET 0xa00 -#define GBE13_HW_STATS_OFFSET 0xb00 -#define GBE13_ALE_OFFSET 0xe00 +/* offset relative to base of GBE_SM_REG_INDEX */ +#define GBE13_HOST_PORT_OFFSET 0x34 +#define GBE13_SLAVE_PORT_OFFSET 0x60 +#define GBE13_EMAC_OFFSET 0x100 +#define GBE13_SLAVE_PORT2_OFFSET 0x200 +#define GBE13_HW_STATS_OFFSET 0x300 +#define GBE13_ALE_OFFSET 0x600 #define GBE13_HOST_PORT_NUM 0 -#define GBE13_NUM_SLAVES 4 -#define GBE13_NUM_ALE_PORTS (GBE13_NUM_SLAVES + 1) #define GBE13_NUM_ALE_ENTRIES 1024 +/* 1G Ethernet NU SS defines */ +#define GBENU_MODULE_NAME "netcp-gbenu" +#define GBE_SS_ID_NU 0x4ee6 +#define GBE_SS_ID_2U 0x4ee8 + +#define IS_SS_ID_MU(d) \ + ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \ + (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)) + +#define IS_SS_ID_NU(d) \ + (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) + +#define GBENU_SS_REG_INDEX 0 +#define GBENU_SM_REG_INDEX 1 +#define GBENU_SGMII_MODULE_OFFSET 0x100 +#define GBENU_HOST_PORT_OFFSET 0x1000 +#define GBENU_SLAVE_PORT_OFFSET 0x2000 +#define GBENU_EMAC_OFFSET 0x2330 +#define GBENU_HW_STATS_OFFSET 0x1a000 +#define GBENU_ALE_OFFSET 0x1e000 +#define GBENU_HOST_PORT_NUM 0 +#define GBENU_NUM_ALE_ENTRIES 1024 + /* 10G Ethernet SS defines */ #define XGBE_MODULE_NAME "netcp-xgbe" #define XGBE_SS_VERSION_10 0x4ee42100 -#define XGBE_SERDES_REG_INDEX 1 +#define XGBE_SS_REG_INDEX 0 +#define XGBE_SM_REG_INDEX 1 +#define XGBE_SERDES_REG_INDEX 2 + +/* offset relative to base of XGBE_SS_REG_INDEX */ #define XGBE10_SGMII_MODULE_OFFSET 0x100 -#define XGBE10_SWITCH_MODULE_OFFSET 0x1000 -#define XGBE10_HOST_PORT_OFFSET 0x1034 -#define XGBE10_SLAVE_PORT_OFFSET 0x1064 -#define XGBE10_EMAC_OFFSET 0x1400 -#define XGBE10_ALE_OFFSET 0x1700 -#define XGBE10_HW_STATS_OFFSET 0x1800 +/* offset relative to base of XGBE_SM_REG_INDEX */ +#define XGBE10_HOST_PORT_OFFSET 0x34 +#define XGBE10_SLAVE_PORT_OFFSET 0x64 +#define XGBE10_EMAC_OFFSET 0x400 +#define XGBE10_ALE_OFFSET 0x700 +#define XGBE10_HW_STATS_OFFSET 0x800 #define XGBE10_HOST_PORT_NUM 0 -#define XGBE10_NUM_SLAVES 2 -#define XGBE10_NUM_ALE_PORTS (XGBE10_NUM_SLAVES + 1) #define XGBE10_NUM_ALE_ENTRIES 1024 #define GBE_TIMER_INTERVAL (HZ / 2) @@ -88,7 +114,7 @@ #define MACSL_FULLDUPLEX BIT(0) #define GBE_CTL_P0_ENABLE BIT(2) -#define GBE_REG_VAL_STAT_ENABLE_ALL 0xff +#define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff #define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf #define GBE_STATS_CD_SEL BIT(28) @@ -108,11 +134,20 @@ #define GBE_STATSC_MODULE 2 #define GBE_STATSD_MODULE 3 +#define GBENU_STATS0_MODULE 0 +#define GBENU_STATS1_MODULE 1 +#define GBENU_STATS2_MODULE 2 +#define GBENU_STATS3_MODULE 3 +#define GBENU_STATS4_MODULE 4 +#define GBENU_STATS5_MODULE 5 +#define GBENU_STATS6_MODULE 6 +#define GBENU_STATS7_MODULE 7 +#define GBENU_STATS8_MODULE 8 + #define XGBE_STATS0_MODULE 0 #define XGBE_STATS1_MODULE 1 #define XGBE_STATS2_MODULE 2 -#define MAX_SLAVES GBE13_NUM_SLAVES /* s: 0-based slave_port */ #define SGMII_BASE(s) \ (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) @@ -125,10 +160,14 @@ #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ offsetof(struct gbe##_##rb, rn) +#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ + offsetof(struct gbenu##_##rb, rn) #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ offsetof(struct xgbe##_##rb, rn) #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn) +#define HOST_TX_PRI_MAP_DEFAULT 0x00000000 + struct xgbe_ss_regs { u32 id_ver; u32 synce_count; @@ -258,6 +297,192 @@ struct xgbe_hw_stats { #define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32)) +struct gbenu_ss_regs { + u32 id_ver; + u32 synce_count; /* NU */ + u32 synce_mux; /* NU */ + u32 control; /* 2U */ + u32 __rsvd_0[2]; /* 2U */ + u32 rgmii_status; /* 2U */ + u32 ss_status; /* 2U */ +}; + +struct gbenu_switch_regs { + u32 id_ver; + u32 control; + u32 __rsvd_0[2]; + u32 emcontrol; + u32 stat_port_en; + u32 ptype; /* NU */ + u32 soft_idle; + u32 thru_rate; /* NU */ + u32 gap_thresh; /* NU */ + u32 tx_start_wds; /* NU */ + u32 eee_prescale; /* 2U */ + u32 tx_g_oflow_thresh_set; /* NU */ + u32 tx_g_oflow_thresh_clr; /* NU */ + u32 tx_g_buf_thresh_set_l; /* NU */ + u32 tx_g_buf_thresh_set_h; /* NU */ + u32 tx_g_buf_thresh_clr_l; /* NU */ + u32 tx_g_buf_thresh_clr_h; /* NU */ +}; + +struct gbenu_port_regs { + u32 __rsvd_0; + u32 control; + u32 max_blks; /* 2U */ + u32 mem_align1; + u32 blk_cnt; + u32 port_vlan; + u32 tx_pri_map; /* NU */ + u32 pri_ctl; /* 2U */ + u32 rx_pri_map; + u32 rx_maxlen; + u32 tx_blks_pri; /* NU */ + u32 __rsvd_1; + u32 idle2lpi; /* 2U */ + u32 lpi2idle; /* 2U */ + u32 eee_status; /* 2U */ + u32 __rsvd_2; + u32 __rsvd_3[176]; /* NU: more to add */ + u32 __rsvd_4[2]; + u32 sa_lo; + u32 sa_hi; + u32 ts_ctl; + u32 ts_seq_ltype; + u32 ts_vlan; + u32 ts_ctl_ltype2; + u32 ts_ctl2; +}; + +struct gbenu_host_port_regs { + u32 __rsvd_0; + u32 control; + u32 flow_id_offset; /* 2U */ + u32 __rsvd_1; + u32 blk_cnt; + u32 port_vlan; + u32 tx_pri_map; /* NU */ + u32 pri_ctl; + u32 rx_pri_map; + u32 rx_maxlen; + u32 tx_blks_pri; /* NU */ + u32 __rsvd_2; + u32 idle2lpi; /* 2U */ + u32 lpi2wake; /* 2U */ + u32 eee_status; /* 2U */ + u32 __rsvd_3; + u32 __rsvd_4[184]; /* NU */ + u32 host_blks_pri; /* NU */ +}; + +struct gbenu_emac_regs { + u32 mac_control; + u32 mac_status; + u32 soft_reset; + u32 boff_test; + u32 rx_pause; + u32 __rsvd_0[11]; /* NU */ + u32 tx_pause; + u32 __rsvd_1[11]; /* NU */ + u32 em_control; + u32 tx_gap; +}; + +/* Some hw stat regs are applicable to slave port only. + * This is handled by gbenu_et_stats struct. Also some + * are for SS version NU and some are for 2U. + */ +struct gbenu_hw_stats { + u32 rx_good_frames; + u32 rx_broadcast_frames; + u32 rx_multicast_frames; + u32 rx_pause_frames; /* slave */ + u32 rx_crc_errors; + u32 rx_align_code_errors; /* slave */ + u32 rx_oversized_frames; + u32 rx_jabber_frames; /* slave */ + u32 rx_undersized_frames; + u32 rx_fragments; /* slave */ + u32 ale_drop; + u32 ale_overrun_drop; + u32 rx_bytes; + u32 tx_good_frames; + u32 tx_broadcast_frames; + u32 tx_multicast_frames; + u32 tx_pause_frames; /* slave */ + u32 tx_deferred_frames; /* slave */ + u32 tx_collision_frames; /* slave */ + u32 tx_single_coll_frames; /* slave */ + u32 tx_mult_coll_frames; /* slave */ + u32 tx_excessive_collisions; /* slave */ + u32 tx_late_collisions; /* slave */ + u32 rx_ipg_error; /* slave 10G only */ + u32 tx_carrier_sense_errors; /* slave */ + u32 tx_bytes; + u32 tx_64B_frames; + u32 tx_65_to_127B_frames; + u32 tx_128_to_255B_frames; + u32 tx_256_to_511B_frames; + u32 tx_512_to_1023B_frames; + u32 tx_1024B_frames; + u32 net_bytes; + u32 rx_bottom_fifo_drop; + u32 rx_port_mask_drop; + u32 rx_top_fifo_drop; + u32 ale_rate_limit_drop; + u32 ale_vid_ingress_drop; + u32 ale_da_eq_sa_drop; + u32 __rsvd_0[3]; + u32 ale_unknown_ucast; + u32 ale_unknown_ucast_bytes; + u32 ale_unknown_mcast; + u32 ale_unknown_mcast_bytes; + u32 ale_unknown_bcast; + u32 ale_unknown_bcast_bytes; + u32 ale_pol_match; + u32 ale_pol_match_red; /* NU */ + u32 ale_pol_match_yellow; /* NU */ + u32 __rsvd_1[44]; + u32 tx_mem_protect_err; + /* following NU only */ + u32 tx_pri0; + u32 tx_pri1; + u32 tx_pri2; + u32 tx_pri3; + u32 tx_pri4; + u32 tx_pri5; + u32 tx_pri6; + u32 tx_pri7; + u32 tx_pri0_bcnt; + u32 tx_pri1_bcnt; + u32 tx_pri2_bcnt; + u32 tx_pri3_bcnt; + u32 tx_pri4_bcnt; + u32 tx_pri5_bcnt; + u32 tx_pri6_bcnt; + u32 tx_pri7_bcnt; + u32 tx_pri0_drop; + u32 tx_pri1_drop; + u32 tx_pri2_drop; + u32 tx_pri3_drop; + u32 tx_pri4_drop; + u32 tx_pri5_drop; + u32 tx_pri6_drop; + u32 tx_pri7_drop; + u32 tx_pri0_drop_bcnt; + u32 tx_pri1_drop_bcnt; + u32 tx_pri2_drop_bcnt; + u32 tx_pri3_drop_bcnt; + u32 tx_pri4_drop_bcnt; + u32 tx_pri5_drop_bcnt; + u32 tx_pri6_drop_bcnt; + u32 tx_pri7_drop_bcnt; +}; + +#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32)) +#define GBENU_HW_STATS_REG_MAP_SZ 0x200 + struct gbe_ss_regs { u32 id_ver; u32 synce_count; @@ -316,6 +541,7 @@ struct gbe_port_regs_ofs { u16 ts_vlan; u16 ts_ctl_ltype2; u16 ts_ctl2; + u16 rx_maxlen; /* 2U, NU */ }; struct gbe_host_port_regs { @@ -390,9 +616,7 @@ struct gbe_hw_stats { }; #define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32)) -#define GBE13_NUM_HW_STATS_MOD 2 -#define XGBE10_NUM_HW_STATS_MOD 3 -#define GBE_MAX_HW_STAT_MODS 3 +#define GBE_MAX_HW_STAT_MODS 9 #define GBE_HW_STATS_REG_MAP_SZ 0x100 struct gbe_slave { @@ -420,11 +644,14 @@ struct gbe_priv { u32 ale_entries; u32 ale_ports; bool enable_ale; + u8 max_num_slaves; + u8 max_num_ports; /* max_num_slaves + 1 */ struct netcp_tx_pipe tx_pipe; int host_port; u32 rx_packet_max; u32 ss_version; + u32 stats_en_mask; void __iomem *ss_regs; void __iomem *switch_regs; @@ -475,275 +702,778 @@ struct netcp_ethtool_stat { int offset; }; -#define GBE_STATSA_INFO(field) "GBE_A:"#field, GBE_STATSA_MODULE,\ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ - offsetof(struct gbe_hw_stats, field) +#define GBE_STATSA_INFO(field) \ +{ \ + "GBE_A:"#field, GBE_STATSA_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} -#define GBE_STATSB_INFO(field) "GBE_B:"#field, GBE_STATSB_MODULE,\ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ - offsetof(struct gbe_hw_stats, field) +#define GBE_STATSB_INFO(field) \ +{ \ + "GBE_B:"#field, GBE_STATSB_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} -#define GBE_STATSC_INFO(field) "GBE_C:"#field, GBE_STATSC_MODULE,\ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ - offsetof(struct gbe_hw_stats, field) +#define GBE_STATSC_INFO(field) \ +{ \ + "GBE_C:"#field, GBE_STATSC_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} -#define GBE_STATSD_INFO(field) "GBE_D:"#field, GBE_STATSD_MODULE,\ - FIELD_SIZEOF(struct gbe_hw_stats, field), \ - offsetof(struct gbe_hw_stats, field) +#define GBE_STATSD_INFO(field) \ +{ \ + "GBE_D:"#field, GBE_STATSD_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} static const struct netcp_ethtool_stat gbe13_et_stats[] = { /* GBE module A */ - {GBE_STATSA_INFO(rx_good_frames)}, - {GBE_STATSA_INFO(rx_broadcast_frames)}, - {GBE_STATSA_INFO(rx_multicast_frames)}, - {GBE_STATSA_INFO(rx_pause_frames)}, - {GBE_STATSA_INFO(rx_crc_errors)}, - {GBE_STATSA_INFO(rx_align_code_errors)}, - {GBE_STATSA_INFO(rx_oversized_frames)}, - {GBE_STATSA_INFO(rx_jabber_frames)}, - {GBE_STATSA_INFO(rx_undersized_frames)}, - {GBE_STATSA_INFO(rx_fragments)}, - {GBE_STATSA_INFO(rx_bytes)}, - {GBE_STATSA_INFO(tx_good_frames)}, - {GBE_STATSA_INFO(tx_broadcast_frames)}, - {GBE_STATSA_INFO(tx_multicast_frames)}, - {GBE_STATSA_INFO(tx_pause_frames)}, - {GBE_STATSA_INFO(tx_deferred_frames)}, - {GBE_STATSA_INFO(tx_collision_frames)}, - {GBE_STATSA_INFO(tx_single_coll_frames)}, - {GBE_STATSA_INFO(tx_mult_coll_frames)}, - {GBE_STATSA_INFO(tx_excessive_collisions)}, - {GBE_STATSA_INFO(tx_late_collisions)}, - {GBE_STATSA_INFO(tx_underrun)}, - {GBE_STATSA_INFO(tx_carrier_sense_errors)}, - {GBE_STATSA_INFO(tx_bytes)}, - {GBE_STATSA_INFO(tx_64byte_frames)}, - {GBE_STATSA_INFO(tx_65_to_127byte_frames)}, - {GBE_STATSA_INFO(tx_128_to_255byte_frames)}, - {GBE_STATSA_INFO(tx_256_to_511byte_frames)}, - {GBE_STATSA_INFO(tx_512_to_1023byte_frames)}, - {GBE_STATSA_INFO(tx_1024byte_frames)}, - {GBE_STATSA_INFO(net_bytes)}, - {GBE_STATSA_INFO(rx_sof_overruns)}, - {GBE_STATSA_INFO(rx_mof_overruns)}, - {GBE_STATSA_INFO(rx_dma_overruns)}, + GBE_STATSA_INFO(rx_good_frames), + GBE_STATSA_INFO(rx_broadcast_frames), + GBE_STATSA_INFO(rx_multicast_frames), + GBE_STATSA_INFO(rx_pause_frames), + GBE_STATSA_INFO(rx_crc_errors), + GBE_STATSA_INFO(rx_align_code_errors), + GBE_STATSA_INFO(rx_oversized_frames), + GBE_STATSA_INFO(rx_jabber_frames), + GBE_STATSA_INFO(rx_undersized_frames), + GBE_STATSA_INFO(rx_fragments), + GBE_STATSA_INFO(rx_bytes), + GBE_STATSA_INFO(tx_good_frames), + GBE_STATSA_INFO(tx_broadcast_frames), + GBE_STATSA_INFO(tx_multicast_frames), + GBE_STATSA_INFO(tx_pause_frames), + GBE_STATSA_INFO(tx_deferred_frames), + GBE_STATSA_INFO(tx_collision_frames), + GBE_STATSA_INFO(tx_single_coll_frames), + GBE_STATSA_INFO(tx_mult_coll_frames), + GBE_STATSA_INFO(tx_excessive_collisions), + GBE_STATSA_INFO(tx_late_collisions), + GBE_STATSA_INFO(tx_underrun), + GBE_STATSA_INFO(tx_carrier_sense_errors), + GBE_STATSA_INFO(tx_bytes), + GBE_STATSA_INFO(tx_64byte_frames), + GBE_STATSA_INFO(tx_65_to_127byte_frames), + GBE_STATSA_INFO(tx_128_to_255byte_frames), + GBE_STATSA_INFO(tx_256_to_511byte_frames), + GBE_STATSA_INFO(tx_512_to_1023byte_frames), + GBE_STATSA_INFO(tx_1024byte_frames), + GBE_STATSA_INFO(net_bytes), + GBE_STATSA_INFO(rx_sof_overruns), + GBE_STATSA_INFO(rx_mof_overruns), + GBE_STATSA_INFO(rx_dma_overruns), /* GBE module B */ - {GBE_STATSB_INFO(rx_good_frames)}, - {GBE_STATSB_INFO(rx_broadcast_frames)}, - {GBE_STATSB_INFO(rx_multicast_frames)}, - {GBE_STATSB_INFO(rx_pause_frames)}, - {GBE_STATSB_INFO(rx_crc_errors)}, - {GBE_STATSB_INFO(rx_align_code_errors)}, - {GBE_STATSB_INFO(rx_oversized_frames)}, - {GBE_STATSB_INFO(rx_jabber_frames)}, - {GBE_STATSB_INFO(rx_undersized_frames)}, - {GBE_STATSB_INFO(rx_fragments)}, - {GBE_STATSB_INFO(rx_bytes)}, - {GBE_STATSB_INFO(tx_good_frames)}, - {GBE_STATSB_INFO(tx_broadcast_frames)}, - {GBE_STATSB_INFO(tx_multicast_frames)}, - {GBE_STATSB_INFO(tx_pause_frames)}, - {GBE_STATSB_INFO(tx_deferred_frames)}, - {GBE_STATSB_INFO(tx_collision_frames)}, - {GBE_STATSB_INFO(tx_single_coll_frames)}, - {GBE_STATSB_INFO(tx_mult_coll_frames)}, - {GBE_STATSB_INFO(tx_excessive_collisions)}, - {GBE_STATSB_INFO(tx_late_collisions)}, - {GBE_STATSB_INFO(tx_underrun)}, - {GBE_STATSB_INFO(tx_carrier_sense_errors)}, - {GBE_STATSB_INFO(tx_bytes)}, - {GBE_STATSB_INFO(tx_64byte_frames)}, - {GBE_STATSB_INFO(tx_65_to_127byte_frames)}, - {GBE_STATSB_INFO(tx_128_to_255byte_frames)}, - {GBE_STATSB_INFO(tx_256_to_511byte_frames)}, - {GBE_STATSB_INFO(tx_512_to_1023byte_frames)}, - {GBE_STATSB_INFO(tx_1024byte_frames)}, - {GBE_STATSB_INFO(net_bytes)}, - {GBE_STATSB_INFO(rx_sof_overruns)}, - {GBE_STATSB_INFO(rx_mof_overruns)}, - {GBE_STATSB_INFO(rx_dma_overruns)}, + GBE_STATSB_INFO(rx_good_frames), + GBE_STATSB_INFO(rx_broadcast_frames), + GBE_STATSB_INFO(rx_multicast_frames), + GBE_STATSB_INFO(rx_pause_frames), + GBE_STATSB_INFO(rx_crc_errors), + GBE_STATSB_INFO(rx_align_code_errors), + GBE_STATSB_INFO(rx_oversized_frames), + GBE_STATSB_INFO(rx_jabber_frames), + GBE_STATSB_INFO(rx_undersized_frames), + GBE_STATSB_INFO(rx_fragments), + GBE_STATSB_INFO(rx_bytes), + GBE_STATSB_INFO(tx_good_frames), + GBE_STATSB_INFO(tx_broadcast_frames), + GBE_STATSB_INFO(tx_multicast_frames), + GBE_STATSB_INFO(tx_pause_frames), + GBE_STATSB_INFO(tx_deferred_frames), + GBE_STATSB_INFO(tx_collision_frames), + GBE_STATSB_INFO(tx_single_coll_frames), + GBE_STATSB_INFO(tx_mult_coll_frames), + GBE_STATSB_INFO(tx_excessive_collisions), + GBE_STATSB_INFO(tx_late_collisions), + GBE_STATSB_INFO(tx_underrun), + GBE_STATSB_INFO(tx_carrier_sense_errors), + GBE_STATSB_INFO(tx_bytes), + GBE_STATSB_INFO(tx_64byte_frames), + GBE_STATSB_INFO(tx_65_to_127byte_frames), + GBE_STATSB_INFO(tx_128_to_255byte_frames), + GBE_STATSB_INFO(tx_256_to_511byte_frames), + GBE_STATSB_INFO(tx_512_to_1023byte_frames), + GBE_STATSB_INFO(tx_1024byte_frames), + GBE_STATSB_INFO(net_bytes), + GBE_STATSB_INFO(rx_sof_overruns), + GBE_STATSB_INFO(rx_mof_overruns), + GBE_STATSB_INFO(rx_dma_overruns), /* GBE module C */ - {GBE_STATSC_INFO(rx_good_frames)}, - {GBE_STATSC_INFO(rx_broadcast_frames)}, - {GBE_STATSC_INFO(rx_multicast_frames)}, - {GBE_STATSC_INFO(rx_pause_frames)}, - {GBE_STATSC_INFO(rx_crc_errors)}, - {GBE_STATSC_INFO(rx_align_code_errors)}, - {GBE_STATSC_INFO(rx_oversized_frames)}, - {GBE_STATSC_INFO(rx_jabber_frames)}, - {GBE_STATSC_INFO(rx_undersized_frames)}, - {GBE_STATSC_INFO(rx_fragments)}, - {GBE_STATSC_INFO(rx_bytes)}, - {GBE_STATSC_INFO(tx_good_frames)}, - {GBE_STATSC_INFO(tx_broadcast_frames)}, - {GBE_STATSC_INFO(tx_multicast_frames)}, - {GBE_STATSC_INFO(tx_pause_frames)}, - {GBE_STATSC_INFO(tx_deferred_frames)}, - {GBE_STATSC_INFO(tx_collision_frames)}, - {GBE_STATSC_INFO(tx_single_coll_frames)}, - {GBE_STATSC_INFO(tx_mult_coll_frames)}, - {GBE_STATSC_INFO(tx_excessive_collisions)}, - {GBE_STATSC_INFO(tx_late_collisions)}, - {GBE_STATSC_INFO(tx_underrun)}, - {GBE_STATSC_INFO(tx_carrier_sense_errors)}, - {GBE_STATSC_INFO(tx_bytes)}, - {GBE_STATSC_INFO(tx_64byte_frames)}, - {GBE_STATSC_INFO(tx_65_to_127byte_frames)}, - {GBE_STATSC_INFO(tx_128_to_255byte_frames)}, - {GBE_STATSC_INFO(tx_256_to_511byte_frames)}, - {GBE_STATSC_INFO(tx_512_to_1023byte_frames)}, - {GBE_STATSC_INFO(tx_1024byte_frames)}, - {GBE_STATSC_INFO(net_bytes)}, - {GBE_STATSC_INFO(rx_sof_overruns)}, - {GBE_STATSC_INFO(rx_mof_overruns)}, - {GBE_STATSC_INFO(rx_dma_overruns)}, + GBE_STATSC_INFO(rx_good_frames), + GBE_STATSC_INFO(rx_broadcast_frames), + GBE_STATSC_INFO(rx_multicast_frames), + GBE_STATSC_INFO(rx_pause_frames), + GBE_STATSC_INFO(rx_crc_errors), + GBE_STATSC_INFO(rx_align_code_errors), + GBE_STATSC_INFO(rx_oversized_frames), + GBE_STATSC_INFO(rx_jabber_frames), + GBE_STATSC_INFO(rx_undersized_frames), + GBE_STATSC_INFO(rx_fragments), + GBE_STATSC_INFO(rx_bytes), + GBE_STATSC_INFO(tx_good_frames), + GBE_STATSC_INFO(tx_broadcast_frames), + GBE_STATSC_INFO(tx_multicast_frames), + GBE_STATSC_INFO(tx_pause_frames), + GBE_STATSC_INFO(tx_deferred_frames), + GBE_STATSC_INFO(tx_collision_frames), + GBE_STATSC_INFO(tx_single_coll_frames), + GBE_STATSC_INFO(tx_mult_coll_frames), + GBE_STATSC_INFO(tx_excessive_collisions), + GBE_STATSC_INFO(tx_late_collisions), + GBE_STATSC_INFO(tx_underrun), + GBE_STATSC_INFO(tx_carrier_sense_errors), + GBE_STATSC_INFO(tx_bytes), + GBE_STATSC_INFO(tx_64byte_frames), + GBE_STATSC_INFO(tx_65_to_127byte_frames), + GBE_STATSC_INFO(tx_128_to_255byte_frames), + GBE_STATSC_INFO(tx_256_to_511byte_frames), + GBE_STATSC_INFO(tx_512_to_1023byte_frames), + GBE_STATSC_INFO(tx_1024byte_frames), + GBE_STATSC_INFO(net_bytes), + GBE_STATSC_INFO(rx_sof_overruns), + GBE_STATSC_INFO(rx_mof_overruns), + GBE_STATSC_INFO(rx_dma_overruns), /* GBE module D */ - {GBE_STATSD_INFO(rx_good_frames)}, - {GBE_STATSD_INFO(rx_broadcast_frames)}, - {GBE_STATSD_INFO(rx_multicast_frames)}, - {GBE_STATSD_INFO(rx_pause_frames)}, - {GBE_STATSD_INFO(rx_crc_errors)}, - {GBE_STATSD_INFO(rx_align_code_errors)}, - {GBE_STATSD_INFO(rx_oversized_frames)}, - {GBE_STATSD_INFO(rx_jabber_frames)}, - {GBE_STATSD_INFO(rx_undersized_frames)}, - {GBE_STATSD_INFO(rx_fragments)}, - {GBE_STATSD_INFO(rx_bytes)}, - {GBE_STATSD_INFO(tx_good_frames)}, - {GBE_STATSD_INFO(tx_broadcast_frames)}, - {GBE_STATSD_INFO(tx_multicast_frames)}, - {GBE_STATSD_INFO(tx_pause_frames)}, - {GBE_STATSD_INFO(tx_deferred_frames)}, - {GBE_STATSD_INFO(tx_collision_frames)}, - {GBE_STATSD_INFO(tx_single_coll_frames)}, - {GBE_STATSD_INFO(tx_mult_coll_frames)}, - {GBE_STATSD_INFO(tx_excessive_collisions)}, - {GBE_STATSD_INFO(tx_late_collisions)}, - {GBE_STATSD_INFO(tx_underrun)}, - {GBE_STATSD_INFO(tx_carrier_sense_errors)}, - {GBE_STATSD_INFO(tx_bytes)}, - {GBE_STATSD_INFO(tx_64byte_frames)}, - {GBE_STATSD_INFO(tx_65_to_127byte_frames)}, - {GBE_STATSD_INFO(tx_128_to_255byte_frames)}, - {GBE_STATSD_INFO(tx_256_to_511byte_frames)}, - {GBE_STATSD_INFO(tx_512_to_1023byte_frames)}, - {GBE_STATSD_INFO(tx_1024byte_frames)}, - {GBE_STATSD_INFO(net_bytes)}, - {GBE_STATSD_INFO(rx_sof_overruns)}, - {GBE_STATSD_INFO(rx_mof_overruns)}, - {GBE_STATSD_INFO(rx_dma_overruns)}, + GBE_STATSD_INFO(rx_good_frames), + GBE_STATSD_INFO(rx_broadcast_frames), + GBE_STATSD_INFO(rx_multicast_frames), + GBE_STATSD_INFO(rx_pause_frames), + GBE_STATSD_INFO(rx_crc_errors), + GBE_STATSD_INFO(rx_align_code_errors), + GBE_STATSD_INFO(rx_oversized_frames), + GBE_STATSD_INFO(rx_jabber_frames), + GBE_STATSD_INFO(rx_undersized_frames), + GBE_STATSD_INFO(rx_fragments), + GBE_STATSD_INFO(rx_bytes), + GBE_STATSD_INFO(tx_good_frames), + GBE_STATSD_INFO(tx_broadcast_frames), + GBE_STATSD_INFO(tx_multicast_frames), + GBE_STATSD_INFO(tx_pause_frames), + GBE_STATSD_INFO(tx_deferred_frames), + GBE_STATSD_INFO(tx_collision_frames), + GBE_STATSD_INFO(tx_single_coll_frames), + GBE_STATSD_INFO(tx_mult_coll_frames), + GBE_STATSD_INFO(tx_excessive_collisions), + GBE_STATSD_INFO(tx_late_collisions), + GBE_STATSD_INFO(tx_underrun), + GBE_STATSD_INFO(tx_carrier_sense_errors), + GBE_STATSD_INFO(tx_bytes), + GBE_STATSD_INFO(tx_64byte_frames), + GBE_STATSD_INFO(tx_65_to_127byte_frames), + GBE_STATSD_INFO(tx_128_to_255byte_frames), + GBE_STATSD_INFO(tx_256_to_511byte_frames), + GBE_STATSD_INFO(tx_512_to_1023byte_frames), + GBE_STATSD_INFO(tx_1024byte_frames), + GBE_STATSD_INFO(net_bytes), + GBE_STATSD_INFO(rx_sof_overruns), + GBE_STATSD_INFO(rx_mof_overruns), + GBE_STATSD_INFO(rx_dma_overruns), }; -#define XGBE_STATS0_INFO(field) "GBE_0:"#field, XGBE_STATS0_MODULE, \ - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ - offsetof(struct xgbe_hw_stats, field) +/* This is the size of entries in GBENU_STATS_HOST */ +#define GBENU_ET_STATS_HOST_SIZE 33 + +#define GBENU_STATS_HOST(field) \ +{ \ + "GBE_HOST:"#field, GBENU_STATS0_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} -#define XGBE_STATS1_INFO(field) "GBE_1:"#field, XGBE_STATS1_MODULE, \ - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ - offsetof(struct xgbe_hw_stats, field) +/* This is the size of entries in GBENU_STATS_HOST */ +#define GBENU_ET_STATS_PORT_SIZE 46 -#define XGBE_STATS2_INFO(field) "GBE_2:"#field, XGBE_STATS2_MODULE, \ - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ - offsetof(struct xgbe_hw_stats, field) +#define GBENU_STATS_P1(field) \ +{ \ + "GBE_P1:"#field, GBENU_STATS1_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P2(field) \ +{ \ + "GBE_P2:"#field, GBENU_STATS2_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P3(field) \ +{ \ + "GBE_P3:"#field, GBENU_STATS3_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P4(field) \ +{ \ + "GBE_P4:"#field, GBENU_STATS4_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P5(field) \ +{ \ + "GBE_P5:"#field, GBENU_STATS5_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P6(field) \ +{ \ + "GBE_P6:"#field, GBENU_STATS6_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P7(field) \ +{ \ + "GBE_P7:"#field, GBENU_STATS7_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P8(field) \ +{ \ + "GBE_P8:"#field, GBENU_STATS8_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +static const struct netcp_ethtool_stat gbenu_et_stats[] = { + /* GBENU Host Module */ + GBENU_STATS_HOST(rx_good_frames), + GBENU_STATS_HOST(rx_broadcast_frames), + GBENU_STATS_HOST(rx_multicast_frames), + GBENU_STATS_HOST(rx_crc_errors), + GBENU_STATS_HOST(rx_oversized_frames), + GBENU_STATS_HOST(rx_undersized_frames), + GBENU_STATS_HOST(ale_drop), + GBENU_STATS_HOST(ale_overrun_drop), + GBENU_STATS_HOST(rx_bytes), + GBENU_STATS_HOST(tx_good_frames), + GBENU_STATS_HOST(tx_broadcast_frames), + GBENU_STATS_HOST(tx_multicast_frames), + GBENU_STATS_HOST(tx_bytes), + GBENU_STATS_HOST(tx_64B_frames), + GBENU_STATS_HOST(tx_65_to_127B_frames), + GBENU_STATS_HOST(tx_128_to_255B_frames), + GBENU_STATS_HOST(tx_256_to_511B_frames), + GBENU_STATS_HOST(tx_512_to_1023B_frames), + GBENU_STATS_HOST(tx_1024B_frames), + GBENU_STATS_HOST(net_bytes), + GBENU_STATS_HOST(rx_bottom_fifo_drop), + GBENU_STATS_HOST(rx_port_mask_drop), + GBENU_STATS_HOST(rx_top_fifo_drop), + GBENU_STATS_HOST(ale_rate_limit_drop), + GBENU_STATS_HOST(ale_vid_ingress_drop), + GBENU_STATS_HOST(ale_da_eq_sa_drop), + GBENU_STATS_HOST(ale_unknown_ucast), + GBENU_STATS_HOST(ale_unknown_ucast_bytes), + GBENU_STATS_HOST(ale_unknown_mcast), + GBENU_STATS_HOST(ale_unknown_mcast_bytes), + GBENU_STATS_HOST(ale_unknown_bcast), + GBENU_STATS_HOST(ale_unknown_bcast_bytes), + GBENU_STATS_HOST(tx_mem_protect_err), + /* GBENU Module 1 */ + GBENU_STATS_P1(rx_good_frames), + GBENU_STATS_P1(rx_broadcast_frames), + GBENU_STATS_P1(rx_multicast_frames), + GBENU_STATS_P1(rx_pause_frames), + GBENU_STATS_P1(rx_crc_errors), + GBENU_STATS_P1(rx_align_code_errors), + GBENU_STATS_P1(rx_oversized_frames), + GBENU_STATS_P1(rx_jabber_frames), + GBENU_STATS_P1(rx_undersized_frames), + GBENU_STATS_P1(rx_fragments), + GBENU_STATS_P1(ale_drop), + GBENU_STATS_P1(ale_overrun_drop), + GBENU_STATS_P1(rx_bytes), + GBENU_STATS_P1(tx_good_frames), + GBENU_STATS_P1(tx_broadcast_frames), + GBENU_STATS_P1(tx_multicast_frames), + GBENU_STATS_P1(tx_pause_frames), + GBENU_STATS_P1(tx_deferred_frames), + GBENU_STATS_P1(tx_collision_frames), + GBENU_STATS_P1(tx_single_coll_frames), + GBENU_STATS_P1(tx_mult_coll_frames), + GBENU_STATS_P1(tx_excessive_collisions), + GBENU_STATS_P1(tx_late_collisions), + GBENU_STATS_P1(rx_ipg_error), + GBENU_STATS_P1(tx_carrier_sense_errors), + GBENU_STATS_P1(tx_bytes), + GBENU_STATS_P1(tx_64B_frames), + GBENU_STATS_P1(tx_65_to_127B_frames), + GBENU_STATS_P1(tx_128_to_255B_frames), + GBENU_STATS_P1(tx_256_to_511B_frames), + GBENU_STATS_P1(tx_512_to_1023B_frames), + GBENU_STATS_P1(tx_1024B_frames), + GBENU_STATS_P1(net_bytes), + GBENU_STATS_P1(rx_bottom_fifo_drop), + GBENU_STATS_P1(rx_port_mask_drop), + GBENU_STATS_P1(rx_top_fifo_drop), + GBENU_STATS_P1(ale_rate_limit_drop), + GBENU_STATS_P1(ale_vid_ingress_drop), + GBENU_STATS_P1(ale_da_eq_sa_drop), + GBENU_STATS_P1(ale_unknown_ucast), + GBENU_STATS_P1(ale_unknown_ucast_bytes), + GBENU_STATS_P1(ale_unknown_mcast), + GBENU_STATS_P1(ale_unknown_mcast_bytes), + GBENU_STATS_P1(ale_unknown_bcast), + GBENU_STATS_P1(ale_unknown_bcast_bytes), + GBENU_STATS_P1(tx_mem_protect_err), + /* GBENU Module 2 */ + GBENU_STATS_P2(rx_good_frames), + GBENU_STATS_P2(rx_broadcast_frames), + GBENU_STATS_P2(rx_multicast_frames), + GBENU_STATS_P2(rx_pause_frames), + GBENU_STATS_P2(rx_crc_errors), + GBENU_STATS_P2(rx_align_code_errors), + GBENU_STATS_P2(rx_oversized_frames), + GBENU_STATS_P2(rx_jabber_frames), + GBENU_STATS_P2(rx_undersized_frames), + GBENU_STATS_P2(rx_fragments), + GBENU_STATS_P2(ale_drop), + GBENU_STATS_P2(ale_overrun_drop), + GBENU_STATS_P2(rx_bytes), + GBENU_STATS_P2(tx_good_frames), + GBENU_STATS_P2(tx_broadcast_frames), + GBENU_STATS_P2(tx_multicast_frames), + GBENU_STATS_P2(tx_pause_frames), + GBENU_STATS_P2(tx_deferred_frames), + GBENU_STATS_P2(tx_collision_frames), + GBENU_STATS_P2(tx_single_coll_frames), + GBENU_STATS_P2(tx_mult_coll_frames), + GBENU_STATS_P2(tx_excessive_collisions), + GBENU_STATS_P2(tx_late_collisions), + GBENU_STATS_P2(rx_ipg_error), + GBENU_STATS_P2(tx_carrier_sense_errors), + GBENU_STATS_P2(tx_bytes), + GBENU_STATS_P2(tx_64B_frames), + GBENU_STATS_P2(tx_65_to_127B_frames), + GBENU_STATS_P2(tx_128_to_255B_frames), + GBENU_STATS_P2(tx_256_to_511B_frames), + GBENU_STATS_P2(tx_512_to_1023B_frames), + GBENU_STATS_P2(tx_1024B_frames), + GBENU_STATS_P2(net_bytes), + GBENU_STATS_P2(rx_bottom_fifo_drop), + GBENU_STATS_P2(rx_port_mask_drop), + GBENU_STATS_P2(rx_top_fifo_drop), + GBENU_STATS_P2(ale_rate_limit_drop), + GBENU_STATS_P2(ale_vid_ingress_drop), + GBENU_STATS_P2(ale_da_eq_sa_drop), + GBENU_STATS_P2(ale_unknown_ucast), + GBENU_STATS_P2(ale_unknown_ucast_bytes), + GBENU_STATS_P2(ale_unknown_mcast), + GBENU_STATS_P2(ale_unknown_mcast_bytes), + GBENU_STATS_P2(ale_unknown_bcast), + GBENU_STATS_P2(ale_unknown_bcast_bytes), + GBENU_STATS_P2(tx_mem_protect_err), + /* GBENU Module 3 */ + GBENU_STATS_P3(rx_good_frames), + GBENU_STATS_P3(rx_broadcast_frames), + GBENU_STATS_P3(rx_multicast_frames), + GBENU_STATS_P3(rx_pause_frames), + GBENU_STATS_P3(rx_crc_errors), + GBENU_STATS_P3(rx_align_code_errors), + GBENU_STATS_P3(rx_oversized_frames), + GBENU_STATS_P3(rx_jabber_frames), + GBENU_STATS_P3(rx_undersized_frames), + GBENU_STATS_P3(rx_fragments), + GBENU_STATS_P3(ale_drop), + GBENU_STATS_P3(ale_overrun_drop), + GBENU_STATS_P3(rx_bytes), + GBENU_STATS_P3(tx_good_frames), + GBENU_STATS_P3(tx_broadcast_frames), + GBENU_STATS_P3(tx_multicast_frames), + GBENU_STATS_P3(tx_pause_frames), + GBENU_STATS_P3(tx_deferred_frames), + GBENU_STATS_P3(tx_collision_frames), + GBENU_STATS_P3(tx_single_coll_frames), + GBENU_STATS_P3(tx_mult_coll_frames), + GBENU_STATS_P3(tx_excessive_collisions), + GBENU_STATS_P3(tx_late_collisions), + GBENU_STATS_P3(rx_ipg_error), + GBENU_STATS_P3(tx_carrier_sense_errors), + GBENU_STATS_P3(tx_bytes), + GBENU_STATS_P3(tx_64B_frames), + GBENU_STATS_P3(tx_65_to_127B_frames), + GBENU_STATS_P3(tx_128_to_255B_frames), + GBENU_STATS_P3(tx_256_to_511B_frames), + GBENU_STATS_P3(tx_512_to_1023B_frames), + GBENU_STATS_P3(tx_1024B_frames), + GBENU_STATS_P3(net_bytes), + GBENU_STATS_P3(rx_bottom_fifo_drop), + GBENU_STATS_P3(rx_port_mask_drop), + GBENU_STATS_P3(rx_top_fifo_drop), + GBENU_STATS_P3(ale_rate_limit_drop), + GBENU_STATS_P3(ale_vid_ingress_drop), + GBENU_STATS_P3(ale_da_eq_sa_drop), + GBENU_STATS_P3(ale_unknown_ucast), + GBENU_STATS_P3(ale_unknown_ucast_bytes), + GBENU_STATS_P3(ale_unknown_mcast), + GBENU_STATS_P3(ale_unknown_mcast_bytes), + GBENU_STATS_P3(ale_unknown_bcast), + GBENU_STATS_P3(ale_unknown_bcast_bytes), + GBENU_STATS_P3(tx_mem_protect_err), + /* GBENU Module 4 */ + GBENU_STATS_P4(rx_good_frames), + GBENU_STATS_P4(rx_broadcast_frames), + GBENU_STATS_P4(rx_multicast_frames), + GBENU_STATS_P4(rx_pause_frames), + GBENU_STATS_P4(rx_crc_errors), + GBENU_STATS_P4(rx_align_code_errors), + GBENU_STATS_P4(rx_oversized_frames), + GBENU_STATS_P4(rx_jabber_frames), + GBENU_STATS_P4(rx_undersized_frames), + GBENU_STATS_P4(rx_fragments), + GBENU_STATS_P4(ale_drop), + GBENU_STATS_P4(ale_overrun_drop), + GBENU_STATS_P4(rx_bytes), + GBENU_STATS_P4(tx_good_frames), + GBENU_STATS_P4(tx_broadcast_frames), + GBENU_STATS_P4(tx_multicast_frames), + GBENU_STATS_P4(tx_pause_frames), + GBENU_STATS_P4(tx_deferred_frames), + GBENU_STATS_P4(tx_collision_frames), + GBENU_STATS_P4(tx_single_coll_frames), + GBENU_STATS_P4(tx_mult_coll_frames), + GBENU_STATS_P4(tx_excessive_collisions), + GBENU_STATS_P4(tx_late_collisions), + GBENU_STATS_P4(rx_ipg_error), + GBENU_STATS_P4(tx_carrier_sense_errors), + GBENU_STATS_P4(tx_bytes), + GBENU_STATS_P4(tx_64B_frames), + GBENU_STATS_P4(tx_65_to_127B_frames), + GBENU_STATS_P4(tx_128_to_255B_frames), + GBENU_STATS_P4(tx_256_to_511B_frames), + GBENU_STATS_P4(tx_512_to_1023B_frames), + GBENU_STATS_P4(tx_1024B_frames), + GBENU_STATS_P4(net_bytes), + GBENU_STATS_P4(rx_bottom_fifo_drop), + GBENU_STATS_P4(rx_port_mask_drop), + GBENU_STATS_P4(rx_top_fifo_drop), + GBENU_STATS_P4(ale_rate_limit_drop), + GBENU_STATS_P4(ale_vid_ingress_drop), + GBENU_STATS_P4(ale_da_eq_sa_drop), + GBENU_STATS_P4(ale_unknown_ucast), + GBENU_STATS_P4(ale_unknown_ucast_bytes), + GBENU_STATS_P4(ale_unknown_mcast), + GBENU_STATS_P4(ale_unknown_mcast_bytes), + GBENU_STATS_P4(ale_unknown_bcast), + GBENU_STATS_P4(ale_unknown_bcast_bytes), + GBENU_STATS_P4(tx_mem_protect_err), + /* GBENU Module 5 */ + GBENU_STATS_P5(rx_good_frames), + GBENU_STATS_P5(rx_broadcast_frames), + GBENU_STATS_P5(rx_multicast_frames), + GBENU_STATS_P5(rx_pause_frames), + GBENU_STATS_P5(rx_crc_errors), + GBENU_STATS_P5(rx_align_code_errors), + GBENU_STATS_P5(rx_oversized_frames), + GBENU_STATS_P5(rx_jabber_frames), + GBENU_STATS_P5(rx_undersized_frames), + GBENU_STATS_P5(rx_fragments), + GBENU_STATS_P5(ale_drop), + GBENU_STATS_P5(ale_overrun_drop), + GBENU_STATS_P5(rx_bytes), + GBENU_STATS_P5(tx_good_frames), + GBENU_STATS_P5(tx_broadcast_frames), + GBENU_STATS_P5(tx_multicast_frames), + GBENU_STATS_P5(tx_pause_frames), + GBENU_STATS_P5(tx_deferred_frames), + GBENU_STATS_P5(tx_collision_frames), + GBENU_STATS_P5(tx_single_coll_frames), + GBENU_STATS_P5(tx_mult_coll_frames), + GBENU_STATS_P5(tx_excessive_collisions), + GBENU_STATS_P5(tx_late_collisions), + GBENU_STATS_P5(rx_ipg_error), + GBENU_STATS_P5(tx_carrier_sense_errors), + GBENU_STATS_P5(tx_bytes), + GBENU_STATS_P5(tx_64B_frames), + GBENU_STATS_P5(tx_65_to_127B_frames), + GBENU_STATS_P5(tx_128_to_255B_frames), + GBENU_STATS_P5(tx_256_to_511B_frames), + GBENU_STATS_P5(tx_512_to_1023B_frames), + GBENU_STATS_P5(tx_1024B_frames), + GBENU_STATS_P5(net_bytes), + GBENU_STATS_P5(rx_bottom_fifo_drop), + GBENU_STATS_P5(rx_port_mask_drop), + GBENU_STATS_P5(rx_top_fifo_drop), + GBENU_STATS_P5(ale_rate_limit_drop), + GBENU_STATS_P5(ale_vid_ingress_drop), + GBENU_STATS_P5(ale_da_eq_sa_drop), + GBENU_STATS_P5(ale_unknown_ucast), + GBENU_STATS_P5(ale_unknown_ucast_bytes), + GBENU_STATS_P5(ale_unknown_mcast), + GBENU_STATS_P5(ale_unknown_mcast_bytes), + GBENU_STATS_P5(ale_unknown_bcast), + GBENU_STATS_P5(ale_unknown_bcast_bytes), + GBENU_STATS_P5(tx_mem_protect_err), + /* GBENU Module 6 */ + GBENU_STATS_P6(rx_good_frames), + GBENU_STATS_P6(rx_broadcast_frames), + GBENU_STATS_P6(rx_multicast_frames), + GBENU_STATS_P6(rx_pause_frames), + GBENU_STATS_P6(rx_crc_errors), + GBENU_STATS_P6(rx_align_code_errors), + GBENU_STATS_P6(rx_oversized_frames), + GBENU_STATS_P6(rx_jabber_frames), + GBENU_STATS_P6(rx_undersized_frames), + GBENU_STATS_P6(rx_fragments), + GBENU_STATS_P6(ale_drop), + GBENU_STATS_P6(ale_overrun_drop), + GBENU_STATS_P6(rx_bytes), + GBENU_STATS_P6(tx_good_frames), + GBENU_STATS_P6(tx_broadcast_frames), + GBENU_STATS_P6(tx_multicast_frames), + GBENU_STATS_P6(tx_pause_frames), + GBENU_STATS_P6(tx_deferred_frames), + GBENU_STATS_P6(tx_collision_frames), + GBENU_STATS_P6(tx_single_coll_frames), + GBENU_STATS_P6(tx_mult_coll_frames), + GBENU_STATS_P6(tx_excessive_collisions), + GBENU_STATS_P6(tx_late_collisions), + GBENU_STATS_P6(rx_ipg_error), + GBENU_STATS_P6(tx_carrier_sense_errors), + GBENU_STATS_P6(tx_bytes), + GBENU_STATS_P6(tx_64B_frames), + GBENU_STATS_P6(tx_65_to_127B_frames), + GBENU_STATS_P6(tx_128_to_255B_frames), + GBENU_STATS_P6(tx_256_to_511B_frames), + GBENU_STATS_P6(tx_512_to_1023B_frames), + GBENU_STATS_P6(tx_1024B_frames), + GBENU_STATS_P6(net_bytes), + GBENU_STATS_P6(rx_bottom_fifo_drop), + GBENU_STATS_P6(rx_port_mask_drop), + GBENU_STATS_P6(rx_top_fifo_drop), + GBENU_STATS_P6(ale_rate_limit_drop), + GBENU_STATS_P6(ale_vid_ingress_drop), + GBENU_STATS_P6(ale_da_eq_sa_drop), + GBENU_STATS_P6(ale_unknown_ucast), + GBENU_STATS_P6(ale_unknown_ucast_bytes), + GBENU_STATS_P6(ale_unknown_mcast), + GBENU_STATS_P6(ale_unknown_mcast_bytes), + GBENU_STATS_P6(ale_unknown_bcast), + GBENU_STATS_P6(ale_unknown_bcast_bytes), + GBENU_STATS_P6(tx_mem_protect_err), + /* GBENU Module 7 */ + GBENU_STATS_P7(rx_good_frames), + GBENU_STATS_P7(rx_broadcast_frames), + GBENU_STATS_P7(rx_multicast_frames), + GBENU_STATS_P7(rx_pause_frames), + GBENU_STATS_P7(rx_crc_errors), + GBENU_STATS_P7(rx_align_code_errors), + GBENU_STATS_P7(rx_oversized_frames), + GBENU_STATS_P7(rx_jabber_frames), + GBENU_STATS_P7(rx_undersized_frames), + GBENU_STATS_P7(rx_fragments), + GBENU_STATS_P7(ale_drop), + GBENU_STATS_P7(ale_overrun_drop), + GBENU_STATS_P7(rx_bytes), + GBENU_STATS_P7(tx_good_frames), + GBENU_STATS_P7(tx_broadcast_frames), + GBENU_STATS_P7(tx_multicast_frames), + GBENU_STATS_P7(tx_pause_frames), + GBENU_STATS_P7(tx_deferred_frames), + GBENU_STATS_P7(tx_collision_frames), + GBENU_STATS_P7(tx_single_coll_frames), + GBENU_STATS_P7(tx_mult_coll_frames), + GBENU_STATS_P7(tx_excessive_collisions), + GBENU_STATS_P7(tx_late_collisions), + GBENU_STATS_P7(rx_ipg_error), + GBENU_STATS_P7(tx_carrier_sense_errors), + GBENU_STATS_P7(tx_bytes), + GBENU_STATS_P7(tx_64B_frames), + GBENU_STATS_P7(tx_65_to_127B_frames), + GBENU_STATS_P7(tx_128_to_255B_frames), + GBENU_STATS_P7(tx_256_to_511B_frames), + GBENU_STATS_P7(tx_512_to_1023B_frames), + GBENU_STATS_P7(tx_1024B_frames), + GBENU_STATS_P7(net_bytes), + GBENU_STATS_P7(rx_bottom_fifo_drop), + GBENU_STATS_P7(rx_port_mask_drop), + GBENU_STATS_P7(rx_top_fifo_drop), + GBENU_STATS_P7(ale_rate_limit_drop), + GBENU_STATS_P7(ale_vid_ingress_drop), + GBENU_STATS_P7(ale_da_eq_sa_drop), + GBENU_STATS_P7(ale_unknown_ucast), + GBENU_STATS_P7(ale_unknown_ucast_bytes), + GBENU_STATS_P7(ale_unknown_mcast), + GBENU_STATS_P7(ale_unknown_mcast_bytes), + GBENU_STATS_P7(ale_unknown_bcast), + GBENU_STATS_P7(ale_unknown_bcast_bytes), + GBENU_STATS_P7(tx_mem_protect_err), + /* GBENU Module 8 */ + GBENU_STATS_P8(rx_good_frames), + GBENU_STATS_P8(rx_broadcast_frames), + GBENU_STATS_P8(rx_multicast_frames), + GBENU_STATS_P8(rx_pause_frames), + GBENU_STATS_P8(rx_crc_errors), + GBENU_STATS_P8(rx_align_code_errors), + GBENU_STATS_P8(rx_oversized_frames), + GBENU_STATS_P8(rx_jabber_frames), + GBENU_STATS_P8(rx_undersized_frames), + GBENU_STATS_P8(rx_fragments), + GBENU_STATS_P8(ale_drop), + GBENU_STATS_P8(ale_overrun_drop), + GBENU_STATS_P8(rx_bytes), + GBENU_STATS_P8(tx_good_frames), + GBENU_STATS_P8(tx_broadcast_frames), + GBENU_STATS_P8(tx_multicast_frames), + GBENU_STATS_P8(tx_pause_frames), + GBENU_STATS_P8(tx_deferred_frames), + GBENU_STATS_P8(tx_collision_frames), + GBENU_STATS_P8(tx_single_coll_frames), + GBENU_STATS_P8(tx_mult_coll_frames), + GBENU_STATS_P8(tx_excessive_collisions), + GBENU_STATS_P8(tx_late_collisions), + GBENU_STATS_P8(rx_ipg_error), + GBENU_STATS_P8(tx_carrier_sense_errors), + GBENU_STATS_P8(tx_bytes), + GBENU_STATS_P8(tx_64B_frames), + GBENU_STATS_P8(tx_65_to_127B_frames), + GBENU_STATS_P8(tx_128_to_255B_frames), + GBENU_STATS_P8(tx_256_to_511B_frames), + GBENU_STATS_P8(tx_512_to_1023B_frames), + GBENU_STATS_P8(tx_1024B_frames), + GBENU_STATS_P8(net_bytes), + GBENU_STATS_P8(rx_bottom_fifo_drop), + GBENU_STATS_P8(rx_port_mask_drop), + GBENU_STATS_P8(rx_top_fifo_drop), + GBENU_STATS_P8(ale_rate_limit_drop), + GBENU_STATS_P8(ale_vid_ingress_drop), + GBENU_STATS_P8(ale_da_eq_sa_drop), + GBENU_STATS_P8(ale_unknown_ucast), + GBENU_STATS_P8(ale_unknown_ucast_bytes), + GBENU_STATS_P8(ale_unknown_mcast), + GBENU_STATS_P8(ale_unknown_mcast_bytes), + GBENU_STATS_P8(ale_unknown_bcast), + GBENU_STATS_P8(ale_unknown_bcast_bytes), + GBENU_STATS_P8(tx_mem_protect_err), +}; + +#define XGBE_STATS0_INFO(field) \ +{ \ + "GBE_0:"#field, XGBE_STATS0_MODULE, \ + FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + offsetof(struct xgbe_hw_stats, field) \ +} + +#define XGBE_STATS1_INFO(field) \ +{ \ + "GBE_1:"#field, XGBE_STATS1_MODULE, \ + FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + offsetof(struct xgbe_hw_stats, field) \ +} + +#define XGBE_STATS2_INFO(field) \ +{ \ + "GBE_2:"#field, XGBE_STATS2_MODULE, \ + FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + offsetof(struct xgbe_hw_stats, field) \ +} static const struct netcp_ethtool_stat xgbe10_et_stats[] = { /* GBE module 0 */ - {XGBE_STATS0_INFO(rx_good_frames)}, - {XGBE_STATS0_INFO(rx_broadcast_frames)}, - {XGBE_STATS0_INFO(rx_multicast_frames)}, - {XGBE_STATS0_INFO(rx_oversized_frames)}, - {XGBE_STATS0_INFO(rx_undersized_frames)}, - {XGBE_STATS0_INFO(overrun_type4)}, - {XGBE_STATS0_INFO(overrun_type5)}, - {XGBE_STATS0_INFO(rx_bytes)}, - {XGBE_STATS0_INFO(tx_good_frames)}, - {XGBE_STATS0_INFO(tx_broadcast_frames)}, - {XGBE_STATS0_INFO(tx_multicast_frames)}, - {XGBE_STATS0_INFO(tx_bytes)}, - {XGBE_STATS0_INFO(tx_64byte_frames)}, - {XGBE_STATS0_INFO(tx_65_to_127byte_frames)}, - {XGBE_STATS0_INFO(tx_128_to_255byte_frames)}, - {XGBE_STATS0_INFO(tx_256_to_511byte_frames)}, - {XGBE_STATS0_INFO(tx_512_to_1023byte_frames)}, - {XGBE_STATS0_INFO(tx_1024byte_frames)}, - {XGBE_STATS0_INFO(net_bytes)}, - {XGBE_STATS0_INFO(rx_sof_overruns)}, - {XGBE_STATS0_INFO(rx_mof_overruns)}, - {XGBE_STATS0_INFO(rx_dma_overruns)}, + XGBE_STATS0_INFO(rx_good_frames), + XGBE_STATS0_INFO(rx_broadcast_frames), + XGBE_STATS0_INFO(rx_multicast_frames), + XGBE_STATS0_INFO(rx_oversized_frames), + XGBE_STATS0_INFO(rx_undersized_frames), + XGBE_STATS0_INFO(overrun_type4), + XGBE_STATS0_INFO(overrun_type5), + XGBE_STATS0_INFO(rx_bytes), + XGBE_STATS0_INFO(tx_good_frames), + XGBE_STATS0_INFO(tx_broadcast_frames), + XGBE_STATS0_INFO(tx_multicast_frames), + XGBE_STATS0_INFO(tx_bytes), + XGBE_STATS0_INFO(tx_64byte_frames), + XGBE_STATS0_INFO(tx_65_to_127byte_frames), + XGBE_STATS0_INFO(tx_128_to_255byte_frames), + XGBE_STATS0_INFO(tx_256_to_511byte_frames), + XGBE_STATS0_INFO(tx_512_to_1023byte_frames), + XGBE_STATS0_INFO(tx_1024byte_frames), + XGBE_STATS0_INFO(net_bytes), + XGBE_STATS0_INFO(rx_sof_overruns), + XGBE_STATS0_INFO(rx_mof_overruns), + XGBE_STATS0_INFO(rx_dma_overruns), /* XGBE module 1 */ - {XGBE_STATS1_INFO(rx_good_frames)}, - {XGBE_STATS1_INFO(rx_broadcast_frames)}, - {XGBE_STATS1_INFO(rx_multicast_frames)}, - {XGBE_STATS1_INFO(rx_pause_frames)}, - {XGBE_STATS1_INFO(rx_crc_errors)}, - {XGBE_STATS1_INFO(rx_align_code_errors)}, - {XGBE_STATS1_INFO(rx_oversized_frames)}, - {XGBE_STATS1_INFO(rx_jabber_frames)}, - {XGBE_STATS1_INFO(rx_undersized_frames)}, - {XGBE_STATS1_INFO(rx_fragments)}, - {XGBE_STATS1_INFO(overrun_type4)}, - {XGBE_STATS1_INFO(overrun_type5)}, - {XGBE_STATS1_INFO(rx_bytes)}, - {XGBE_STATS1_INFO(tx_good_frames)}, - {XGBE_STATS1_INFO(tx_broadcast_frames)}, - {XGBE_STATS1_INFO(tx_multicast_frames)}, - {XGBE_STATS1_INFO(tx_pause_frames)}, - {XGBE_STATS1_INFO(tx_deferred_frames)}, - {XGBE_STATS1_INFO(tx_collision_frames)}, - {XGBE_STATS1_INFO(tx_single_coll_frames)}, - {XGBE_STATS1_INFO(tx_mult_coll_frames)}, - {XGBE_STATS1_INFO(tx_excessive_collisions)}, - {XGBE_STATS1_INFO(tx_late_collisions)}, - {XGBE_STATS1_INFO(tx_underrun)}, - {XGBE_STATS1_INFO(tx_carrier_sense_errors)}, - {XGBE_STATS1_INFO(tx_bytes)}, - {XGBE_STATS1_INFO(tx_64byte_frames)}, - {XGBE_STATS1_INFO(tx_65_to_127byte_frames)}, - {XGBE_STATS1_INFO(tx_128_to_255byte_frames)}, - {XGBE_STATS1_INFO(tx_256_to_511byte_frames)}, - {XGBE_STATS1_INFO(tx_512_to_1023byte_frames)}, - {XGBE_STATS1_INFO(tx_1024byte_frames)}, - {XGBE_STATS1_INFO(net_bytes)}, - {XGBE_STATS1_INFO(rx_sof_overruns)}, - {XGBE_STATS1_INFO(rx_mof_overruns)}, - {XGBE_STATS1_INFO(rx_dma_overruns)}, + XGBE_STATS1_INFO(rx_good_frames), + XGBE_STATS1_INFO(rx_broadcast_frames), + XGBE_STATS1_INFO(rx_multicast_frames), + XGBE_STATS1_INFO(rx_pause_frames), + XGBE_STATS1_INFO(rx_crc_errors), + XGBE_STATS1_INFO(rx_align_code_errors), + XGBE_STATS1_INFO(rx_oversized_frames), + XGBE_STATS1_INFO(rx_jabber_frames), + XGBE_STATS1_INFO(rx_undersized_frames), + XGBE_STATS1_INFO(rx_fragments), + XGBE_STATS1_INFO(overrun_type4), + XGBE_STATS1_INFO(overrun_type5), + XGBE_STATS1_INFO(rx_bytes), + XGBE_STATS1_INFO(tx_good_frames), + XGBE_STATS1_INFO(tx_broadcast_frames), + XGBE_STATS1_INFO(tx_multicast_frames), + XGBE_STATS1_INFO(tx_pause_frames), + XGBE_STATS1_INFO(tx_deferred_frames), + XGBE_STATS1_INFO(tx_collision_frames), + XGBE_STATS1_INFO(tx_single_coll_frames), + XGBE_STATS1_INFO(tx_mult_coll_frames), + XGBE_STATS1_INFO(tx_excessive_collisions), + XGBE_STATS1_INFO(tx_late_collisions), + XGBE_STATS1_INFO(tx_underrun), + XGBE_STATS1_INFO(tx_carrier_sense_errors), + XGBE_STATS1_INFO(tx_bytes), + XGBE_STATS1_INFO(tx_64byte_frames), + XGBE_STATS1_INFO(tx_65_to_127byte_frames), + XGBE_STATS1_INFO(tx_128_to_255byte_frames), + XGBE_STATS1_INFO(tx_256_to_511byte_frames), + XGBE_STATS1_INFO(tx_512_to_1023byte_frames), + XGBE_STATS1_INFO(tx_1024byte_frames), + XGBE_STATS1_INFO(net_bytes), + XGBE_STATS1_INFO(rx_sof_overruns), + XGBE_STATS1_INFO(rx_mof_overruns), + XGBE_STATS1_INFO(rx_dma_overruns), /* XGBE module 2 */ - {XGBE_STATS2_INFO(rx_good_frames)}, - {XGBE_STATS2_INFO(rx_broadcast_frames)}, - {XGBE_STATS2_INFO(rx_multicast_frames)}, - {XGBE_STATS2_INFO(rx_pause_frames)}, - {XGBE_STATS2_INFO(rx_crc_errors)}, - {XGBE_STATS2_INFO(rx_align_code_errors)}, - {XGBE_STATS2_INFO(rx_oversized_frames)}, - {XGBE_STATS2_INFO(rx_jabber_frames)}, - {XGBE_STATS2_INFO(rx_undersized_frames)}, - {XGBE_STATS2_INFO(rx_fragments)}, - {XGBE_STATS2_INFO(overrun_type4)}, - {XGBE_STATS2_INFO(overrun_type5)}, - {XGBE_STATS2_INFO(rx_bytes)}, - {XGBE_STATS2_INFO(tx_good_frames)}, - {XGBE_STATS2_INFO(tx_broadcast_frames)}, - {XGBE_STATS2_INFO(tx_multicast_frames)}, - {XGBE_STATS2_INFO(tx_pause_frames)}, - {XGBE_STATS2_INFO(tx_deferred_frames)}, - {XGBE_STATS2_INFO(tx_collision_frames)}, - {XGBE_STATS2_INFO(tx_single_coll_frames)}, - {XGBE_STATS2_INFO(tx_mult_coll_frames)}, - {XGBE_STATS2_INFO(tx_excessive_collisions)}, - {XGBE_STATS2_INFO(tx_late_collisions)}, - {XGBE_STATS2_INFO(tx_underrun)}, - {XGBE_STATS2_INFO(tx_carrier_sense_errors)}, - {XGBE_STATS2_INFO(tx_bytes)}, - {XGBE_STATS2_INFO(tx_64byte_frames)}, - {XGBE_STATS2_INFO(tx_65_to_127byte_frames)}, - {XGBE_STATS2_INFO(tx_128_to_255byte_frames)}, - {XGBE_STATS2_INFO(tx_256_to_511byte_frames)}, - {XGBE_STATS2_INFO(tx_512_to_1023byte_frames)}, - {XGBE_STATS2_INFO(tx_1024byte_frames)}, - {XGBE_STATS2_INFO(net_bytes)}, - {XGBE_STATS2_INFO(rx_sof_overruns)}, - {XGBE_STATS2_INFO(rx_mof_overruns)}, - {XGBE_STATS2_INFO(rx_dma_overruns)}, + XGBE_STATS2_INFO(rx_good_frames), + XGBE_STATS2_INFO(rx_broadcast_frames), + XGBE_STATS2_INFO(rx_multicast_frames), + XGBE_STATS2_INFO(rx_pause_frames), + XGBE_STATS2_INFO(rx_crc_errors), + XGBE_STATS2_INFO(rx_align_code_errors), + XGBE_STATS2_INFO(rx_oversized_frames), + XGBE_STATS2_INFO(rx_jabber_frames), + XGBE_STATS2_INFO(rx_undersized_frames), + XGBE_STATS2_INFO(rx_fragments), + XGBE_STATS2_INFO(overrun_type4), + XGBE_STATS2_INFO(overrun_type5), + XGBE_STATS2_INFO(rx_bytes), + XGBE_STATS2_INFO(tx_good_frames), + XGBE_STATS2_INFO(tx_broadcast_frames), + XGBE_STATS2_INFO(tx_multicast_frames), + XGBE_STATS2_INFO(tx_pause_frames), + XGBE_STATS2_INFO(tx_deferred_frames), + XGBE_STATS2_INFO(tx_collision_frames), + XGBE_STATS2_INFO(tx_single_coll_frames), + XGBE_STATS2_INFO(tx_mult_coll_frames), + XGBE_STATS2_INFO(tx_excessive_collisions), + XGBE_STATS2_INFO(tx_late_collisions), + XGBE_STATS2_INFO(tx_underrun), + XGBE_STATS2_INFO(tx_carrier_sense_errors), + XGBE_STATS2_INFO(tx_bytes), + XGBE_STATS2_INFO(tx_64byte_frames), + XGBE_STATS2_INFO(tx_65_to_127byte_frames), + XGBE_STATS2_INFO(tx_128_to_255byte_frames), + XGBE_STATS2_INFO(tx_256_to_511byte_frames), + XGBE_STATS2_INFO(tx_512_to_1023byte_frames), + XGBE_STATS2_INFO(tx_1024byte_frames), + XGBE_STATS2_INFO(net_bytes), + XGBE_STATS2_INFO(rx_sof_overruns), + XGBE_STATS2_INFO(rx_mof_overruns), + XGBE_STATS2_INFO(rx_dma_overruns), }; #define for_each_intf(i, priv) \ @@ -1066,9 +1796,16 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, if (!slave->open) return; - if (!SLAVE_LINK_IS_XGMII(slave)) - sgmii_link_state = netcp_sgmii_get_port_link(SGMII_BASE(sp), - sp); + if (!SLAVE_LINK_IS_XGMII(slave)) { + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + sgmii_link_state = + netcp_sgmii_get_port_link(SGMII_BASE(sp), sp); + else + sgmii_link_state = + netcp_sgmii_get_port_link( + gbe_dev->sgmii_port_regs, sp); + } + phy_link_state = gbe_phy_link_status(slave); link_state = phy_link_state & sgmii_link_state; @@ -1137,6 +1874,7 @@ static int gbe_port_reset(struct gbe_slave *slave) static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, int max_rx_len) { + void __iomem *rx_maxlen_reg; u32 xgmii_mode; if (max_rx_len > NETCP_MAX_FRAME_SIZE) @@ -1150,7 +1888,12 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control)); } - writel(max_rx_len, GBE_REG_ADDR(slave, emac_regs, rx_maxlen)); + if (IS_SS_ID_MU(gbe_dev)) + rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen); + else + rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen); + + writel(max_rx_len, rx_maxlen_reg); writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); } @@ -1242,6 +1985,12 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) static void gbe_init_host_port(struct gbe_priv *priv) { int bypass_en = 1; + + /* Host Tx Pri */ + if (IS_SS_ID_NU(priv)) + writel(HOST_TX_PRI_MAP_DEFAULT, + GBE_REG_ADDR(priv, host_port_regs, tx_pri_map)); + /* Max length register */ writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs, rx_maxlen)); @@ -1472,15 +2221,21 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg), GBE_RTL_VERSION(reg), GBE_IDENT(reg)); + /* For 10G and on NetCP 1.5, use directed to port */ + if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev)) + gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO; + if (gbe_dev->enable_ale) - gbe_intf->tx_pipe.dma_psflags = 0; + gbe_intf->tx_pipe.switch_to_port = 0; else - gbe_intf->tx_pipe.dma_psflags = port_num; + gbe_intf->tx_pipe.switch_to_port = port_num; - dev_dbg(gbe_dev->dev, "opened TX channel %s: %p with psflags %d\n", + dev_dbg(gbe_dev->dev, + "opened TX channel %s: %p with to port %d, flags %d\n", gbe_intf->tx_pipe.dma_chan_name, gbe_intf->tx_pipe.dma_channel, - gbe_intf->tx_pipe.dma_psflags); + gbe_intf->tx_pipe.switch_to_port, + gbe_intf->tx_pipe.flags); gbe_slave_stop(gbe_intf); @@ -1491,8 +2246,8 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control)); /* All statistics enabled and STAT AB visible by default */ - writel(GBE_REG_VAL_STAT_ENABLE_ALL, GBE_REG_ADDR(gbe_dev, switch_regs, - stat_port_en)); + writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs, + stat_port_en)); ret = gbe_slave_open(gbe_intf); if (ret) @@ -1529,6 +2284,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, { int port_reg_num; u32 port_reg_ofs, emac_reg_ofs; + u32 port_reg_blk_sz, emac_reg_blk_sz; if (of_property_read_u32(node, "slave-port", &slave->slave_num)) { dev_err(gbe_dev->dev, "missing slave-port parameter\n"); @@ -1560,23 +2316,29 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, } else { port_reg_ofs = GBE13_SLAVE_PORT_OFFSET; } + emac_reg_ofs = GBE13_EMAC_OFFSET; + port_reg_blk_sz = 0x30; + emac_reg_blk_sz = 0x40; + } else if (IS_SS_ID_MU(gbe_dev)) { + port_reg_ofs = GBENU_SLAVE_PORT_OFFSET; + emac_reg_ofs = GBENU_EMAC_OFFSET; + port_reg_blk_sz = 0x1000; + emac_reg_blk_sz = 0x1000; } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET; + emac_reg_ofs = XGBE10_EMAC_OFFSET; + port_reg_blk_sz = 0x30; + emac_reg_blk_sz = 0x40; } else { dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n", gbe_dev->ss_version); return -EINVAL; } - if (gbe_dev->ss_version == GBE_SS_VERSION_14) - emac_reg_ofs = GBE13_EMAC_OFFSET; - else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) - emac_reg_ofs = XGBE10_EMAC_OFFSET; - - slave->port_regs = gbe_dev->ss_regs + port_reg_ofs + - (0x30 * port_reg_num); - slave->emac_regs = gbe_dev->ss_regs + emac_reg_ofs + - (0x40 * slave->slave_num); + slave->port_regs = gbe_dev->switch_regs + port_reg_ofs + + (port_reg_blk_sz * port_reg_num); + slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs + + (emac_reg_blk_sz * slave->slave_num); if (gbe_dev->ss_version == GBE_SS_VERSION_14) { /* Initialize slave port register offsets */ @@ -1595,6 +2357,23 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, GBE_SET_REG_OFS(slave, emac_regs, soft_reset); GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen); + } else if (IS_SS_ID_MU(gbe_dev)) { + /* Initialize slave port register offsets */ + GBENU_SET_REG_OFS(slave, port_regs, port_vlan); + GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map); + GBENU_SET_REG_OFS(slave, port_regs, sa_lo); + GBENU_SET_REG_OFS(slave, port_regs, sa_hi); + GBENU_SET_REG_OFS(slave, port_regs, ts_ctl); + GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype); + GBENU_SET_REG_OFS(slave, port_regs, ts_vlan); + GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); + GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2); + GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen); + + /* Initialize EMAC register offsets */ + GBENU_SET_REG_OFS(slave, emac_regs, mac_control); + GBENU_SET_REG_OFS(slave, emac_regs, soft_reset); + } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { /* Initialize slave port register offsets */ XGBE_SET_REG_OFS(slave, port_regs, port_vlan); @@ -1654,6 +2433,8 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, mac_phy_link = true; slave->open = true; + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) + break; } /* of_phy_connect() is needed only for MAC-PHY interface */ @@ -1724,24 +2505,41 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, void __iomem *regs; int ret, i; - ret = of_address_to_resource(node, 0, &res); + ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res); if (ret) { - dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe subsystem regs\n", - node->name); + dev_err(gbe_dev->dev, + "Can't xlate xgbe of node(%s) ss address at %d\n", + node->name, XGBE_SS_REG_INDEX); return ret; } regs = devm_ioremap_resource(gbe_dev->dev, &res); if (IS_ERR(regs)) { - dev_err(gbe_dev->dev, "Failed to map xgbe register base\n"); + dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n"); return PTR_ERR(regs); } gbe_dev->ss_regs = regs; + ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't xlate xgbe of node(%s) sm address at %d\n", + node->name, XGBE_SM_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n"); + return PTR_ERR(regs); + } + gbe_dev->switch_regs = regs; + ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res); if (ret) { - dev_err(gbe_dev->dev, "Can't translate of node(%s) address for xgbe serdes regs\n", - node->name); + dev_err(gbe_dev->dev, + "Can't xlate xgbe serdes of node(%s) address at %d\n", + node->name, XGBE_SERDES_REG_INDEX); return ret; } @@ -1753,9 +2551,9 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, gbe_dev->xgbe_serdes_regs = regs; gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - XGBE10_NUM_STAT_ENTRIES * - (XGBE10_NUM_SLAVES + 1) * sizeof(u64), - GFP_KERNEL); + XGBE10_NUM_STAT_ENTRIES * + (gbe_dev->max_num_ports) * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; @@ -1764,19 +2562,19 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, gbe_dev->ss_version = XGBE_SS_VERSION_10; gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + XGBE10_SGMII_MODULE_OFFSET; - gbe_dev->switch_regs = gbe_dev->ss_regs + XGBE10_SWITCH_MODULE_OFFSET; gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET; - for (i = 0; i < XGBE10_NUM_HW_STATS_MOD; i++) - gbe_dev->hw_stats_regs[i] = gbe_dev->ss_regs + + for (i = 0; i < gbe_dev->max_num_ports; i++) + gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i); - gbe_dev->ale_reg = gbe_dev->ss_regs + XGBE10_ALE_OFFSET; - gbe_dev->ale_ports = XGBE10_NUM_ALE_PORTS; + gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET; + gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = XGBE10_HOST_PORT_NUM; gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES; gbe_dev->et_stats = xgbe10_et_stats; gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); + gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; /* Subsystem registers */ XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); @@ -1803,10 +2601,11 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev, void __iomem *regs; int ret; - ret = of_address_to_resource(node, 0, &res); + ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res); if (ret) { - dev_err(gbe_dev->dev, "Can't translate of node(%s) address\n", - node->name); + dev_err(gbe_dev->dev, + "Can't translate of node(%s) of gbe ss address at %d\n", + node->name, GBE_SS_REG_INDEX); return ret; } @@ -1823,34 +2622,67 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev, static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, struct device_node *node) { + struct resource res; void __iomem *regs; - int i; + int i, ret; + + ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't translate of gbe node(%s) address at index %d\n", + node->name, GBE_SGMII34_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, + "Failed to map gbe sgmii port34 register base\n"); + return PTR_ERR(regs); + } + gbe_dev->sgmii_port34_regs = regs; + + ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't translate of gbe node(%s) address at index %d\n", + node->name, GBE_SM_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, + "Failed to map gbe switch module register base\n"); + return PTR_ERR(regs); + } + gbe_dev->switch_regs = regs; gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, GBE13_NUM_HW_STAT_ENTRIES * - GBE13_NUM_SLAVES * sizeof(u64), + gbe_dev->max_num_slaves * sizeof(u64), GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } - regs = gbe_dev->ss_regs; - gbe_dev->sgmii_port_regs = regs + GBE13_SGMII_MODULE_OFFSET; - gbe_dev->sgmii_port34_regs = regs + GBE13_SGMII34_MODULE_OFFSET; - gbe_dev->switch_regs = regs + GBE13_SWITCH_MODULE_OFFSET; - gbe_dev->host_port_regs = regs + GBE13_HOST_PORT_OFFSET; + gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET; + gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET; - for (i = 0; i < GBE13_NUM_HW_STATS_MOD; i++) - gbe_dev->hw_stats_regs[i] = regs + GBE13_HW_STATS_OFFSET + - (GBE_HW_STATS_REG_MAP_SZ * i); + for (i = 0; i < gbe_dev->max_num_slaves; i++) { + gbe_dev->hw_stats_regs[i] = + gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET + + (GBE_HW_STATS_REG_MAP_SZ * i); + } - gbe_dev->ale_reg = regs + GBE13_ALE_OFFSET; - gbe_dev->ale_ports = GBE13_NUM_ALE_PORTS; + gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET; + gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBE13_HOST_PORT_NUM; gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; gbe_dev->et_stats = gbe13_et_stats; gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); + gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL; /* Subsystem registers */ GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); @@ -1869,6 +2701,80 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, return 0; } +static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, + struct device_node *node) +{ + struct resource res; + void __iomem *regs; + int i, ret; + + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, + GBENU_NUM_HW_STAT_ENTRIES * + (gbe_dev->max_num_ports) * sizeof(u64), + GFP_KERNEL); + if (!gbe_dev->hw_stats) { + dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); + return -ENOMEM; + } + + ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't translate of gbenu node(%s) addr at index %d\n", + node->name, GBENU_SM_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, + "Failed to map gbenu switch module register base\n"); + return PTR_ERR(regs); + } + gbe_dev->switch_regs = regs; + + gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; + gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; + + for (i = 0; i < (gbe_dev->max_num_ports); i++) + gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + + GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i); + + gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET; + gbe_dev->ale_ports = gbe_dev->max_num_ports; + gbe_dev->host_port = GBENU_HOST_PORT_NUM; + gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; + gbe_dev->et_stats = gbenu_et_stats; + gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; + + if (IS_SS_ID_NU(gbe_dev)) + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); + else + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + GBENU_ET_STATS_PORT_SIZE; + + /* Subsystem registers */ + GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver); + + /* Switch module registers */ + GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver); + GBENU_SET_REG_OFS(gbe_dev, switch_regs, control); + GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); + GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype); + + /* Host port registers */ + GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); + GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); + + /* For NU only. 2U does not need tx_pri_map. + * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads + * while 2U has only 1 such thread + */ + GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map); + return 0; +} + static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, struct device_node *node, void **inst_priv) { @@ -1888,6 +2794,21 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, if (!gbe_dev) return -ENOMEM; + if (of_device_is_compatible(node, "ti,netcp-gbe-5") || + of_device_is_compatible(node, "ti,netcp-gbe")) { + gbe_dev->max_num_slaves = 4; + } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) { + gbe_dev->max_num_slaves = 8; + } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) { + gbe_dev->max_num_slaves = 1; + } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) { + gbe_dev->max_num_slaves = 2; + } else { + dev_err(dev, "device tree node for unknown device\n"); + return -EINVAL; + } + gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1; + gbe_dev->dev = dev; gbe_dev->netcp_device = netcp_device; gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE; @@ -1923,7 +2844,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, if (ret) goto quit; - ret = set_gbe_ethss14_priv(gbe_dev, node); + dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); + + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + ret = set_gbe_ethss14_priv(gbe_dev, node); + else if (IS_SS_ID_MU(gbe_dev)) + ret = set_gbenu_ethss_priv(gbe_dev, node); + else + ret = -ENODEV; + if (ret) goto quit; } else if (!strcmp(node->name, "xgbe")) { @@ -1963,6 +2892,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, continue; } gbe_dev->num_slaves++; + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) + break; } if (!gbe_dev->num_slaves) @@ -1971,7 +2902,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, /* Initialize Secondary slave ports */ secondary_ports = of_get_child_by_name(node, "secondary-slave-ports"); INIT_LIST_HEAD(&gbe_dev->secondary_slaves); - if (secondary_ports) + if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves)) init_secondary_ports(gbe_dev, secondary_ports); of_node_put(secondary_ports); diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 8fb807ea1caa..de2850497c09 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -288,7 +288,7 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); * The .data field is currently only used to store quirks */ static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns; -static struct of_device_id rhine_of_tbl[] = { +static const struct of_device_id rhine_of_tbl[] = { { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks }, { } /* terminate list */ }; diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index c20206f83cc1..ae68afd50a15 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -392,7 +392,7 @@ MODULE_DEVICE_TABLE(pci, velocity_pci_id_table); * Describe the OF device identifiers that we support in this * device driver. Used for devicetree nodes. */ -static struct of_device_id velocity_of_ids[] = { +static const struct of_device_id velocity_of_ids[] = { { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] }, { /* Sentinel */ }, }; diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index f5498c26b3c7..8b282d0b169c 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { + napi_complete(napi); w5100_write(priv, W5100_IMR, IR_S0); mmiowb(); - napi_complete(napi); } return rx_count; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index ca0c631ed628..8da7b930ff59 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { + napi_complete(napi); w5300_write(priv, W5300_IMR, IR_S0); mmiowb(); - napi_complete(napi); } return rx_count; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index dbcbf0c5bcfa..690a4c36b316 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1157,7 +1157,7 @@ static int temac_of_remove(struct platform_device *op) return 0; } -static struct of_device_id temac_of_match[] = { +static const struct of_device_id temac_of_match[] = { { .compatible = "xlnx,xps-ll-temac-1.01.b", }, { .compatible = "xlnx,xps-ll-temac-2.00.a", }, { .compatible = "xlnx,xps-ll-temac-2.02.a", }, diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index a6d2860b712c..28b7e7d9c272 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -48,7 +48,7 @@ #define AXIENET_REGS_N 32 /* Match table for of_platform binding */ -static struct of_device_id axienet_of_match[] = { +static const struct of_device_id axienet_of_match[] = { { .compatible = "xlnx,axi-ethernet-1.00.a", }, { .compatible = "xlnx,axi-ethernet-1.01.a", }, { .compatible = "xlnx,axi-ethernet-2.01.a", }, diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 9d4ce388510a..2111b91f5e49 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1231,7 +1231,7 @@ static struct net_device_ops xemaclite_netdev_ops = { }; /* Match table for OF platform binding */ -static struct of_device_id xemaclite_of_match[] = { +static const struct of_device_id xemaclite_of_match[] = { { .compatible = "xlnx,opb-ethernetlite-1.01.a", }, { .compatible = "xlnx,opb-ethernetlite-1.01.b", }, { .compatible = "xlnx,xps-ethernetlite-1.00.a", }, diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 1d438bc54189..cc5efa149da1 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -19,6 +19,8 @@ */ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/hrtimer.h> +#include <linux/jiffies.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/gpio.h> @@ -52,11 +54,21 @@ struct at86rf2xx_chip_data { int (*get_desense_steps)(struct at86rf230_local *, s32); }; -#define AT86RF2XX_MAX_BUF (127 + 3) +#define AT86RF2XX_MAX_BUF (127 + 3) +/* tx retries to access the TX_ON state + * if it's above then force change will be started. + * + * We assume the max_frame_retries (7) value of 802.15.4 here. + */ +#define AT86RF2XX_MAX_TX_RETRIES 7 +/* We use the recommended 5 minutes timeout to recalibrate */ +#define AT86RF2XX_CAL_LOOP_TIMEOUT (5 * 60 * HZ) struct at86rf230_state_change { struct at86rf230_local *lp; + int irq; + struct hrtimer timer; struct spi_message msg; struct spi_transfer trx; u8 buf[AT86RF2XX_MAX_BUF]; @@ -81,10 +93,12 @@ struct at86rf230_local { struct at86rf230_state_change irq; bool tx_aret; + unsigned long cal_timeout; s8 max_frame_retries; bool is_tx; /* spinlock for is_tx protection */ spinlock_t lock; + u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; }; @@ -311,7 +325,7 @@ at86rf230_read_subreg(struct at86rf230_local *lp, int rc; rc = __at86rf230_read(lp, addr, data); - if (rc > 0) + if (!rc) *data = (*data & mask) >> shift; return rc; @@ -407,6 +421,8 @@ at86rf230_reg_volatile(struct device *dev, unsigned int reg) case RG_PHY_ED_LEVEL: case RG_IRQ_STATUS: case RG_VREG_CTRL: + case RG_PLL_CF: + case RG_PLL_DCU: return true; default: return false; @@ -470,18 +486,25 @@ at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg, u8 *tx_buf = ctx->buf; tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG; - ctx->trx.len = 2; ctx->msg.complete = complete; ctx->irq_enable = irq_enable; rc = spi_async(lp->spi, &ctx->msg); if (rc) { if (irq_enable) - enable_irq(lp->spi->irq); + enable_irq(ctx->irq); at86rf230_async_error(lp, ctx, rc); } } +static inline u8 at86rf230_state_to_force(u8 state) +{ + if (state == STATE_TX_ON) + return STATE_FORCE_TX_ON; + else + return STATE_FORCE_TRX_OFF; +} + static void at86rf230_async_state_assert(void *context) { @@ -512,10 +535,21 @@ at86rf230_async_state_assert(void *context) * in STATE_BUSY_RX_AACK, we run a force state change * to STATE_TX_ON. This is a timeout handling, if the * transceiver stucks in STATE_BUSY_RX_AACK. + * + * Additional we do several retries to try to get into + * TX_ON state without forcing. If the retries are + * higher or equal than AT86RF2XX_MAX_TX_RETRIES we + * will do a force change. */ - if (ctx->to_state == STATE_TX_ON) { - at86rf230_async_state_change(lp, ctx, - STATE_FORCE_TX_ON, + if (ctx->to_state == STATE_TX_ON || + ctx->to_state == STATE_TRX_OFF) { + u8 state = ctx->to_state; + + if (lp->tx_retry >= AT86RF2XX_MAX_TX_RETRIES) + state = at86rf230_state_to_force(state); + lp->tx_retry++; + + at86rf230_async_state_change(lp, ctx, state, ctx->complete, ctx->irq_enable); return; @@ -531,6 +565,19 @@ done: ctx->complete(context); } +static enum hrtimer_restart at86rf230_async_state_timer(struct hrtimer *timer) +{ + struct at86rf230_state_change *ctx = + container_of(timer, struct at86rf230_state_change, timer); + struct at86rf230_local *lp = ctx->lp; + + at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, + at86rf230_async_state_assert, + ctx->irq_enable); + + return HRTIMER_NORESTART; +} + /* Do state change timing delay. */ static void at86rf230_async_state_delay(void *context) @@ -539,6 +586,7 @@ at86rf230_async_state_delay(void *context) struct at86rf230_local *lp = ctx->lp; struct at86rf2xx_chip_data *c = lp->data; bool force = false; + ktime_t tim; /* The force state changes are will show as normal states in the * state status subregister. We change the to_state to the @@ -562,11 +610,15 @@ at86rf230_async_state_delay(void *context) case STATE_TRX_OFF: switch (ctx->to_state) { case STATE_RX_AACK_ON: - usleep_range(c->t_off_to_aack, c->t_off_to_aack + 10); + tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC); goto change; case STATE_TX_ON: - usleep_range(c->t_off_to_tx_on, - c->t_off_to_tx_on + 10); + tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC); + /* state change from TRX_OFF to TX_ON to do a + * calibration, we need to reset the timeout for the + * next one. + */ + lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; goto change; default: break; @@ -574,14 +626,15 @@ at86rf230_async_state_delay(void *context) break; case STATE_BUSY_RX_AACK: switch (ctx->to_state) { + case STATE_TRX_OFF: case STATE_TX_ON: /* Wait for worst case receiving time if we * didn't make a force change from BUSY_RX_AACK - * to TX_ON. + * to TX_ON or TRX_OFF. */ if (!force) { - usleep_range(c->t_frame + c->t_p_ack, - c->t_frame + c->t_p_ack + 1000); + tim = ktime_set(0, (c->t_frame + c->t_p_ack) * + NSEC_PER_USEC); goto change; } break; @@ -593,7 +646,7 @@ at86rf230_async_state_delay(void *context) case STATE_P_ON: switch (ctx->to_state) { case STATE_TRX_OFF: - usleep_range(c->t_reset_to_off, c->t_reset_to_off + 10); + tim = ktime_set(0, c->t_reset_to_off * NSEC_PER_USEC); goto change; default: break; @@ -604,12 +657,10 @@ at86rf230_async_state_delay(void *context) } /* Default delay is 1us in the most cases */ - udelay(1); + tim = ktime_set(0, NSEC_PER_USEC); change: - at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, - at86rf230_async_state_assert, - ctx->irq_enable); + hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL); } static void @@ -645,12 +696,11 @@ at86rf230_async_state_change_start(void *context) */ buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; buf[1] = ctx->to_state; - ctx->trx.len = 2; ctx->msg.complete = at86rf230_async_state_delay; rc = spi_async(lp->spi, &ctx->msg); if (rc) { if (ctx->irq_enable) - enable_irq(lp->spi->irq); + enable_irq(ctx->irq); at86rf230_async_error(lp, ctx, rc); } @@ -708,11 +758,10 @@ at86rf230_tx_complete(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - struct sk_buff *skb = lp->tx_skb; - enable_irq(lp->spi->irq); + enable_irq(ctx->irq); - ieee802154_xmit_complete(lp->hw, skb, !lp->tx_aret); + ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret); } static void @@ -721,7 +770,7 @@ at86rf230_tx_on(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON, + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, at86rf230_tx_complete, true); } @@ -765,14 +814,25 @@ at86rf230_tx_trac_status(void *context) } static void -at86rf230_rx(struct at86rf230_local *lp, - const u8 *data, const u8 len, const u8 lqi) +at86rf230_rx_read_frame_complete(void *context) { - struct sk_buff *skb; + struct at86rf230_state_change *ctx = context; + struct at86rf230_local *lp = ctx->lp; u8 rx_local_buf[AT86RF2XX_MAX_BUF]; + const u8 *buf = ctx->buf; + struct sk_buff *skb; + u8 len, lqi; - memcpy(rx_local_buf, data, len); - enable_irq(lp->spi->irq); + len = buf[1]; + if (!ieee802154_is_valid_psdu_len(len)) { + dev_vdbg(&lp->spi->dev, "corrupted frame received\n"); + len = IEEE802154_MTU; + } + lqi = buf[2 + len]; + + memcpy(rx_local_buf, buf + 2, len); + ctx->trx.len = 2; + enable_irq(ctx->irq); skb = dev_alloc_skb(IEEE802154_MTU); if (!skb) { @@ -785,51 +845,34 @@ at86rf230_rx(struct at86rf230_local *lp, } static void -at86rf230_rx_read_frame_complete(void *context) +at86rf230_rx_read_frame(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - const u8 *buf = lp->irq.buf; - u8 len = buf[1]; - - if (!ieee802154_is_valid_psdu_len(len)) { - dev_vdbg(&lp->spi->dev, "corrupted frame received\n"); - len = IEEE802154_MTU; - } - - at86rf230_rx(lp, buf + 2, len, buf[2 + len]); -} - -static void -at86rf230_rx_read_frame(struct at86rf230_local *lp) -{ + u8 *buf = ctx->buf; int rc; - u8 *buf = lp->irq.buf; - buf[0] = CMD_FB; - lp->irq.trx.len = AT86RF2XX_MAX_BUF; - lp->irq.msg.complete = at86rf230_rx_read_frame_complete; - rc = spi_async(lp->spi, &lp->irq.msg); + ctx->trx.len = AT86RF2XX_MAX_BUF; + ctx->msg.complete = at86rf230_rx_read_frame_complete; + rc = spi_async(lp->spi, &ctx->msg); if (rc) { - enable_irq(lp->spi->irq); - at86rf230_async_error(lp, &lp->irq, rc); + ctx->trx.len = 2; + enable_irq(ctx->irq); + at86rf230_async_error(lp, ctx, rc); } } static void at86rf230_rx_trac_check(void *context) { - struct at86rf230_state_change *ctx = context; - struct at86rf230_local *lp = ctx->lp; - /* Possible check on trac status here. This could be useful to make * some stats why receive is failed. Not used at the moment, but it's * maybe timing relevant. Datasheet doesn't say anything about this. * The programming guide say do it so. */ - at86rf230_rx_read_frame(lp); + at86rf230_rx_read_frame(context); } static void @@ -862,13 +905,13 @@ at86rf230_irq_status(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - const u8 *buf = lp->irq.buf; + const u8 *buf = ctx->buf; const u8 irq = buf[1]; if (irq & IRQ_TRX_END) { at86rf230_irq_trx_end(lp); } else { - enable_irq(lp->spi->irq); + enable_irq(ctx->irq); dev_err(&lp->spi->dev, "not supported irq %02x received\n", irq); } @@ -884,7 +927,6 @@ static irqreturn_t at86rf230_isr(int irq, void *data) disable_irq_nosync(irq); buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG; - ctx->trx.len = 2; ctx->msg.complete = at86rf230_irq_status; rc = spi_async(lp->spi, &ctx->msg); if (rc) { @@ -919,7 +961,7 @@ at86rf230_write_frame(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; struct sk_buff *skb = lp->tx_skb; - u8 *buf = lp->tx.buf; + u8 *buf = ctx->buf; int rc; spin_lock(&lp->lock); @@ -929,11 +971,13 @@ at86rf230_write_frame(void *context) buf[0] = CMD_FB | CMD_WRITE; buf[1] = skb->len + 2; memcpy(buf + 2, skb->data, skb->len); - lp->tx.trx.len = skb->len + 2; - lp->tx.msg.complete = at86rf230_write_frame_complete; - rc = spi_async(lp->spi, &lp->tx.msg); - if (rc) + ctx->trx.len = skb->len + 2; + ctx->msg.complete = at86rf230_write_frame_complete; + rc = spi_async(lp->spi, &ctx->msg); + if (rc) { + ctx->trx.len = 2; at86rf230_async_error(lp, ctx, rc); + } } static void @@ -946,24 +990,45 @@ at86rf230_xmit_tx_on(void *context) at86rf230_write_frame, false); } +static void +at86rf230_xmit_start(void *context) +{ + struct at86rf230_state_change *ctx = context; + struct at86rf230_local *lp = ctx->lp; + + /* In ARET mode we need to go into STATE_TX_ARET_ON after we + * are in STATE_TX_ON. The pfad differs here, so we change + * the complete handler. + */ + if (lp->tx_aret) + at86rf230_async_state_change(lp, ctx, STATE_TX_ON, + at86rf230_xmit_tx_on, false); + else + at86rf230_async_state_change(lp, ctx, STATE_TX_ON, + at86rf230_write_frame, false); +} + static int at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct at86rf230_local *lp = hw->priv; struct at86rf230_state_change *ctx = &lp->tx; - void (*tx_complete)(void *context) = at86rf230_write_frame; - lp->tx_skb = skb; + lp->tx_retry = 0; - /* In ARET mode we need to go into STATE_TX_ARET_ON after we - * are in STATE_TX_ON. The pfad differs here, so we change - * the complete handler. + /* After 5 minutes in PLL and the same frequency we run again the + * calibration loops which is recommended by at86rf2xx datasheets. + * + * The calibration is initiate by a state change from TRX_OFF + * to TX_ON, the lp->cal_timeout should be reinit by state_delay + * function then to start in the next 5 minutes. */ - if (lp->tx_aret) - tx_complete = at86rf230_xmit_tx_on; - - at86rf230_async_state_change(lp, ctx, STATE_TX_ON, tx_complete, false); + if (time_is_before_jiffies(lp->cal_timeout)) + at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF, + at86rf230_xmit_start, false); + else + at86rf230_xmit_start(ctx); return 0; } @@ -979,6 +1044,9 @@ at86rf230_ed(struct ieee802154_hw *hw, u8 *level) static int at86rf230_start(struct ieee802154_hw *hw) { + struct at86rf230_local *lp = hw->priv; + + lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON); } @@ -1059,6 +1127,8 @@ at86rf230_channel(struct ieee802154_hw *hw, u8 page, u8 channel) /* Wait for PLL */ usleep_range(lp->data->t_channel_switch, lp->data->t_channel_switch + 10); + + lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; return rc; } @@ -1528,25 +1598,37 @@ static void at86rf230_setup_spi_messages(struct at86rf230_local *lp) { lp->state.lp = lp; + lp->state.irq = lp->spi->irq; spi_message_init(&lp->state.msg); lp->state.msg.context = &lp->state; + lp->state.trx.len = 2; lp->state.trx.tx_buf = lp->state.buf; lp->state.trx.rx_buf = lp->state.buf; spi_message_add_tail(&lp->state.trx, &lp->state.msg); + hrtimer_init(&lp->state.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + lp->state.timer.function = at86rf230_async_state_timer; lp->irq.lp = lp; + lp->irq.irq = lp->spi->irq; spi_message_init(&lp->irq.msg); lp->irq.msg.context = &lp->irq; + lp->irq.trx.len = 2; lp->irq.trx.tx_buf = lp->irq.buf; lp->irq.trx.rx_buf = lp->irq.buf; spi_message_add_tail(&lp->irq.trx, &lp->irq.msg); + hrtimer_init(&lp->irq.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + lp->irq.timer.function = at86rf230_async_state_timer; lp->tx.lp = lp; + lp->tx.irq = lp->spi->irq; spi_message_init(&lp->tx.msg); lp->tx.msg.context = &lp->tx; + lp->tx.trx.len = 2; lp->tx.trx.tx_buf = lp->tx.buf; lp->tx.trx.rx_buf = lp->tx.buf; spi_message_add_tail(&lp->tx.trx, &lp->tx.msg); + hrtimer_init(&lp->tx.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + lp->tx.timer.function = at86rf230_async_state_timer; } static int at86rf230_probe(struct spi_device *spi) @@ -1555,7 +1637,7 @@ static int at86rf230_probe(struct spi_device *spi) struct at86rf230_local *lp; unsigned int status; int rc, irq_type, rstn, slp_tr; - u8 xtal_trim; + u8 xtal_trim = 0; if (!spi->irq) { dev_err(&spi->dev, "no IRQ specified\n"); diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index 181b349b060e..f833b8bb6663 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -714,11 +714,45 @@ static irqreturn_t cc2520_sfd_isr(int irq, void *data) return IRQ_HANDLED; } +static int cc2520_get_platform_data(struct spi_device *spi, + struct cc2520_platform_data *pdata) +{ + struct device_node *np = spi->dev.of_node; + struct cc2520_private *priv = spi_get_drvdata(spi); + + if (!np) { + struct cc2520_platform_data *spi_pdata = spi->dev.platform_data; + if (!spi_pdata) + return -ENOENT; + *pdata = *spi_pdata; + return 0; + } + + pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0); + priv->fifo_pin = pdata->fifo; + + pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0); + + pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0); + pdata->cca = of_get_named_gpio(np, "cca-gpio", 0); + pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0); + pdata->reset = of_get_named_gpio(np, "reset-gpio", 0); + + pdata->amplified = of_property_read_bool(np, "amplified"); + + return 0; +} + static int cc2520_hw_init(struct cc2520_private *priv) { u8 status = 0, state = 0xff; int ret; int timeout = 100; + struct cc2520_platform_data pdata; + + ret = cc2520_get_platform_data(priv->spi, &pdata); + if (ret) + goto err_ret; ret = cc2520_read_register(priv, CC2520_FSMSTAT1, &state); if (ret) @@ -741,11 +775,47 @@ static int cc2520_hw_init(struct cc2520_private *priv) dev_vdbg(&priv->spi->dev, "oscillator brought up\n"); - /* Registers default value: section 28.1 in Datasheet */ - ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7); - if (ret) - goto err_ret; + /* If the CC2520 is connected to a CC2591 amplifier, we must both + * configure GPIOs on the CC2520 to correctly configure the CC2591 + * and change a couple settings of the CC2520 to work with the + * amplifier. See section 8 page 17 of TI application note AN065. + * http://www.ti.com/lit/an/swra229a/swra229a.pdf + */ + if (pdata.amplified) { + ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF9); + if (ret) + goto err_ret; + + ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x16); + if (ret) + goto err_ret; + + ret = cc2520_write_register(priv, CC2520_GPIOCTRL0, 0x46); + if (ret) + goto err_ret; + + ret = cc2520_write_register(priv, CC2520_GPIOCTRL5, 0x47); + if (ret) + goto err_ret; + + ret = cc2520_write_register(priv, CC2520_GPIOPOLARITY, 0x1e); + if (ret) + goto err_ret; + + ret = cc2520_write_register(priv, CC2520_TXCTRL, 0xc1); + if (ret) + goto err_ret; + } else { + ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7); + if (ret) + goto err_ret; + ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11); + if (ret) + goto err_ret; + } + + /* Registers default value: section 28.1 in Datasheet */ ret = cc2520_write_register(priv, CC2520_CCACTRL0, 0x1A); if (ret) goto err_ret; @@ -770,10 +840,6 @@ static int cc2520_hw_init(struct cc2520_private *priv) if (ret) goto err_ret; - ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11); - if (ret) - goto err_ret; - ret = cc2520_write_register(priv, CC2520_ADCTEST0, 0x10); if (ret) goto err_ret; @@ -808,40 +874,10 @@ err_ret: return ret; } -static struct cc2520_platform_data * -cc2520_get_platform_data(struct spi_device *spi) -{ - struct cc2520_platform_data *pdata; - struct device_node *np = spi->dev.of_node; - struct cc2520_private *priv = spi_get_drvdata(spi); - - if (!np) - return spi->dev.platform_data; - - pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - goto done; - - pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0); - priv->fifo_pin = pdata->fifo; - - pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0); - - pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0); - pdata->cca = of_get_named_gpio(np, "cca-gpio", 0); - pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0); - pdata->reset = of_get_named_gpio(np, "reset-gpio", 0); - - spi->dev.platform_data = pdata; - -done: - return pdata; -} - static int cc2520_probe(struct spi_device *spi) { struct cc2520_private *priv; - struct cc2520_platform_data *pdata; + struct cc2520_platform_data pdata; int ret; priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL); @@ -850,8 +886,8 @@ static int cc2520_probe(struct spi_device *spi) spi_set_drvdata(spi, priv); - pdata = cc2520_get_platform_data(spi); - if (!pdata) { + ret = cc2520_get_platform_data(spi, &pdata); + if (ret < 0) { dev_err(&spi->dev, "no platform data\n"); return -EINVAL; } @@ -869,76 +905,76 @@ static int cc2520_probe(struct spi_device *spi) init_completion(&priv->tx_complete); /* Request all the gpio's */ - if (!gpio_is_valid(pdata->fifo)) { + if (!gpio_is_valid(pdata.fifo)) { dev_err(&spi->dev, "fifo gpio is not valid\n"); ret = -EINVAL; goto err_hw_init; } - ret = devm_gpio_request_one(&spi->dev, pdata->fifo, + ret = devm_gpio_request_one(&spi->dev, pdata.fifo, GPIOF_IN, "fifo"); if (ret) goto err_hw_init; - if (!gpio_is_valid(pdata->cca)) { + if (!gpio_is_valid(pdata.cca)) { dev_err(&spi->dev, "cca gpio is not valid\n"); ret = -EINVAL; goto err_hw_init; } - ret = devm_gpio_request_one(&spi->dev, pdata->cca, + ret = devm_gpio_request_one(&spi->dev, pdata.cca, GPIOF_IN, "cca"); if (ret) goto err_hw_init; - if (!gpio_is_valid(pdata->fifop)) { + if (!gpio_is_valid(pdata.fifop)) { dev_err(&spi->dev, "fifop gpio is not valid\n"); ret = -EINVAL; goto err_hw_init; } - ret = devm_gpio_request_one(&spi->dev, pdata->fifop, + ret = devm_gpio_request_one(&spi->dev, pdata.fifop, GPIOF_IN, "fifop"); if (ret) goto err_hw_init; - if (!gpio_is_valid(pdata->sfd)) { + if (!gpio_is_valid(pdata.sfd)) { dev_err(&spi->dev, "sfd gpio is not valid\n"); ret = -EINVAL; goto err_hw_init; } - ret = devm_gpio_request_one(&spi->dev, pdata->sfd, + ret = devm_gpio_request_one(&spi->dev, pdata.sfd, GPIOF_IN, "sfd"); if (ret) goto err_hw_init; - if (!gpio_is_valid(pdata->reset)) { + if (!gpio_is_valid(pdata.reset)) { dev_err(&spi->dev, "reset gpio is not valid\n"); ret = -EINVAL; goto err_hw_init; } - ret = devm_gpio_request_one(&spi->dev, pdata->reset, + ret = devm_gpio_request_one(&spi->dev, pdata.reset, GPIOF_OUT_INIT_LOW, "reset"); if (ret) goto err_hw_init; - if (!gpio_is_valid(pdata->vreg)) { + if (!gpio_is_valid(pdata.vreg)) { dev_err(&spi->dev, "vreg gpio is not valid\n"); ret = -EINVAL; goto err_hw_init; } - ret = devm_gpio_request_one(&spi->dev, pdata->vreg, + ret = devm_gpio_request_one(&spi->dev, pdata.vreg, GPIOF_OUT_INIT_LOW, "vreg"); if (ret) goto err_hw_init; - gpio_set_value(pdata->vreg, HIGH); + gpio_set_value(pdata.vreg, HIGH); usleep_range(100, 150); - gpio_set_value(pdata->reset, HIGH); + gpio_set_value(pdata.reset, HIGH); usleep_range(200, 250); ret = cc2520_hw_init(priv); @@ -947,7 +983,7 @@ static int cc2520_probe(struct spi_device *spi) /* Set up fifop interrupt */ ret = devm_request_irq(&spi->dev, - gpio_to_irq(pdata->fifop), + gpio_to_irq(pdata.fifop), cc2520_fifop_isr, IRQF_TRIGGER_RISING, dev_name(&spi->dev), @@ -959,7 +995,7 @@ static int cc2520_probe(struct spi_device *spi) /* Set up sfd interrupt */ ret = devm_request_irq(&spi->dev, - gpio_to_irq(pdata->sfd), + gpio_to_irq(pdata.sfd), cc2520_sfd_isr, IRQF_TRIGGER_FALLING, dev_name(&spi->dev), diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index 32efbd48f326..fb276f64cd64 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c @@ -78,6 +78,7 @@ #include <linux/bitops.h> #include <linux/property.h> #include <linux/acpi.h> +#include <linux/jiffies.h> MODULE_AUTHOR("Tom Lendacky <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL"); @@ -100,6 +101,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define XGBE_PHY_SPEED_2500 1 #define XGBE_PHY_SPEED_10000 2 +#define XGBE_AN_MS_TIMEOUT 500 + #define XGBE_AN_INT_CMPLT 0x01 #define XGBE_AN_INC_LINK 0x02 #define XGBE_AN_PG_RCV 0x04 @@ -434,6 +437,7 @@ struct amd_xgbe_phy_priv { unsigned int an_supported; unsigned int parallel_detect; unsigned int fec_ability; + unsigned long an_start; unsigned int lpm_ctrl; /* CTRL1 for resume */ }; @@ -902,8 +906,23 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev) { struct amd_xgbe_phy_priv *priv = phydev->priv; enum amd_xgbe_phy_rx *state; + unsigned long an_timeout; int ret; + if (!priv->an_start) { + priv->an_start = jiffies; + } else { + an_timeout = priv->an_start + + msecs_to_jiffies(XGBE_AN_MS_TIMEOUT); + if (time_after(jiffies, an_timeout)) { + /* Auto-negotiation timed out, reset state */ + priv->kr_state = AMD_XGBE_RX_BPA; + priv->kx_state = AMD_XGBE_RX_BPA; + + priv->an_start = jiffies; + } + } + state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state : &priv->kx_state; @@ -932,8 +951,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev) if (amd_xgbe_phy_in_kr_mode(phydev)) { priv->kr_state = AMD_XGBE_RX_ERROR; - if (!(phydev->supported & SUPPORTED_1000baseKX_Full) && - !(phydev->supported & SUPPORTED_2500baseX_Full)) + if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) && + !(phydev->advertising & SUPPORTED_2500baseX_Full)) return AMD_XGBE_AN_NO_LINK; if (priv->kx_state != AMD_XGBE_RX_BPA) @@ -941,7 +960,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev) } else { priv->kx_state = AMD_XGBE_RX_ERROR; - if (!(phydev->supported & SUPPORTED_10000baseKR_Full)) + if (!(phydev->advertising & SUPPORTED_10000baseKR_Full)) return AMD_XGBE_AN_NO_LINK; if (priv->kr_state != AMD_XGBE_RX_BPA) @@ -1078,6 +1097,7 @@ again: priv->an_state = AMD_XGBE_AN_READY; priv->kr_state = AMD_XGBE_RX_BPA; priv->kx_state = AMD_XGBE_RX_BPA; + priv->an_start = 0; } if (cur_state != priv->an_state) @@ -1101,7 +1121,7 @@ static int amd_xgbe_an_init(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->supported & SUPPORTED_10000baseR_FEC) + if (phydev->advertising & SUPPORTED_10000baseR_FEC) ret |= 0xc000; else ret &= ~0xc000; @@ -1113,13 +1133,13 @@ static int amd_xgbe_an_init(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->supported & SUPPORTED_10000baseKR_Full) + if (phydev->advertising & SUPPORTED_10000baseKR_Full) ret |= 0x80; else ret &= ~0x80; - if ((phydev->supported & SUPPORTED_1000baseKX_Full) || - (phydev->supported & SUPPORTED_2500baseX_Full)) + if ((phydev->advertising & SUPPORTED_1000baseKX_Full) || + (phydev->advertising & SUPPORTED_2500baseX_Full)) ret |= 0x20; else ret &= ~0x20; @@ -1131,12 +1151,12 @@ static int amd_xgbe_an_init(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->supported & SUPPORTED_Pause) + if (phydev->advertising & SUPPORTED_Pause) ret |= 0x400; else ret &= ~0x400; - if (phydev->supported & SUPPORTED_Asym_Pause) + if (phydev->advertising & SUPPORTED_Asym_Pause) ret |= 0x800; else ret &= ~0x800; @@ -1212,38 +1232,14 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev) priv->an_irq_allocated = 1; } - ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY); - if (ret < 0) - return ret; - priv->fec_ability = ret & XGBE_PHY_FEC_MASK; - - /* Initialize supported features */ - phydev->supported = SUPPORTED_Autoneg; - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phydev->supported |= SUPPORTED_Backplane; - phydev->supported |= SUPPORTED_10000baseKR_Full; - switch (priv->speed_set) { - case AMD_XGBE_PHY_SPEEDSET_1000_10000: - phydev->supported |= SUPPORTED_1000baseKX_Full; - break; - case AMD_XGBE_PHY_SPEEDSET_2500_10000: - phydev->supported |= SUPPORTED_2500baseX_Full; - break; - } - - if (priv->fec_ability & XGBE_PHY_FEC_ENABLE) - phydev->supported |= SUPPORTED_10000baseR_FEC; - - phydev->advertising = phydev->supported; - /* Set initial mode - call the mode setting routines * directly to insure we are properly configured */ - if (phydev->supported & SUPPORTED_10000baseKR_Full) + if (phydev->advertising & SUPPORTED_10000baseKR_Full) ret = amd_xgbe_phy_xgmii_mode(phydev); - else if (phydev->supported & SUPPORTED_1000baseKX_Full) + else if (phydev->advertising & SUPPORTED_1000baseKX_Full) ret = amd_xgbe_phy_gmii_mode(phydev); - else if (phydev->supported & SUPPORTED_2500baseX_Full) + else if (phydev->advertising & SUPPORTED_2500baseX_Full) ret = amd_xgbe_phy_gmii_2500_mode(phydev); else ret = -EINVAL; @@ -1315,10 +1311,10 @@ static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev) disable_irq(priv->an_irq); /* Start auto-negotiation in a supported mode */ - if (phydev->supported & SUPPORTED_10000baseKR_Full) + if (phydev->advertising & SUPPORTED_10000baseKR_Full) ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR); - else if ((phydev->supported & SUPPORTED_1000baseKX_Full) || - (phydev->supported & SUPPORTED_2500baseX_Full)) + else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) || + (phydev->advertising & SUPPORTED_2500baseX_Full)) ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX); else ret = -EINVAL; @@ -1746,6 +1742,29 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev) sizeof(priv->serdes_dfe_tap_ena)); } + /* Initialize supported features */ + phydev->supported = SUPPORTED_Autoneg; + phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + phydev->supported |= SUPPORTED_Backplane; + phydev->supported |= SUPPORTED_10000baseKR_Full; + switch (priv->speed_set) { + case AMD_XGBE_PHY_SPEEDSET_1000_10000: + phydev->supported |= SUPPORTED_1000baseKX_Full; + break; + case AMD_XGBE_PHY_SPEEDSET_2500_10000: + phydev->supported |= SUPPORTED_2500baseX_Full; + break; + } + + ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY); + if (ret < 0) + return ret; + priv->fec_ability = ret & XGBE_PHY_FEC_MASK; + if (priv->fec_ability & XGBE_PHY_FEC_ENABLE) + phydev->supported |= SUPPORTED_10000baseR_FEC; + + phydev->advertising = phydev->supported; + phydev->priv = priv; if (!priv->adev || acpi_disabled) @@ -1817,6 +1836,7 @@ static struct phy_driver amd_xgbe_phy_driver[] = { .phy_id_mask = XGBE_PHY_MASK, .name = "AMD XGBE PHY", .features = 0, + .flags = PHY_IS_INTERNAL, .probe = amd_xgbe_phy_probe, .remove = amd_xgbe_phy_remove, .soft_reset = amd_xgbe_phy_soft_reset, diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 6deac6d32f57..414fdf1f343f 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -187,7 +187,7 @@ static int unimac_mdio_remove(struct platform_device *pdev) return 0; } -static struct of_device_id unimac_mdio_ids[] = { +static const struct of_device_id unimac_mdio_ids[] = { { .compatible = "brcm,genet-mdio-v4", }, { .compatible = "brcm,genet-mdio-v3", }, { .compatible = "brcm,genet-mdio-v2", }, diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 0a0578a592b8..49ce7ece5af3 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -249,7 +249,7 @@ static int mdio_gpio_remove(struct platform_device *pdev) return 0; } -static struct of_device_id mdio_gpio_of_match[] = { +static const struct of_device_id mdio_gpio_of_match[] = { { .compatible = "virtual,mdio-gpio", }, { /* sentinel */ } }; diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 320eb15315c8..1a87a585e74d 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -99,7 +99,7 @@ static int mdio_mux_gpio_remove(struct platform_device *pdev) return 0; } -static struct of_device_id mdio_mux_gpio_match[] = { +static const struct of_device_id mdio_mux_gpio_match[] = { { .compatible = "mdio-mux-gpio", }, diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c index 0aa985c74014..2377c1341172 100644 --- a/drivers/net/phy/mdio-mux-mmioreg.c +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -145,7 +145,7 @@ static int mdio_mux_mmioreg_remove(struct platform_device *pdev) return 0; } -static struct of_device_id mdio_mux_mmioreg_match[] = { +static const struct of_device_id mdio_mux_mmioreg_match[] = { { .compatible = "mdio-mux-mmioreg", }, diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index c81052486edc..c838ad6155f7 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -252,7 +252,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev) return 0; } -static struct of_device_id octeon_mdiobus_match[] = { +static const struct of_device_id octeon_mdiobus_match[] = { { .compatible = "cavium,octeon-3860-mdio", }, diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 3eed708a6182..fe48f4c51373 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c @@ -300,9 +300,18 @@ static const struct driver_info cx82310_info = { .tx_fixup = cx82310_tx_fixup, }; +#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ + USB_DEVICE_ID_MATCH_DEV_INFO, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bDeviceClass = (cl), \ + .bDeviceSubClass = (sc), \ + .bDeviceProtocol = (pr) + static const struct usb_device_id products[] = { { - USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), + USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), .driver_info = (unsigned long) &cx82310_info }, { }, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f1ff3666f090..59b0e9754ae3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) { int i; - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { + napi_hash_del(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); + } kfree(vi->rq); kfree(vi->sq); @@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev) cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) { + for (i = 0; i < vi->max_queue_pairs; i++) napi_disable(&vi->rq[i].napi); - napi_hash_del(&vi->rq[i].napi); - netif_napi_del(&vi->rq[i].napi); - } } remove_vq_common(vi); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1e0a775ea882..6080f8e7b0cd 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -127,10 +127,6 @@ struct vxlan_dev { __u8 ttl; u32 flags; /* VXLAN_F_* in vxlan.h */ - struct work_struct sock_work; - struct work_struct igmp_join; - struct work_struct igmp_leave; - unsigned long age_interval; struct timer_list age_timer; spinlock_t hash_lock; @@ -144,8 +140,6 @@ struct vxlan_dev { static u32 vxlan_salt __read_mostly; static struct workqueue_struct *vxlan_wq; -static void vxlan_sock_work(struct work_struct *work); - #if IS_ENABLED(CONFIG_IPV6) static inline bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) @@ -1072,11 +1066,6 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) return false; } -static void vxlan_sock_hold(struct vxlan_sock *vs) -{ - atomic_inc(&vs->refcnt); -} - void vxlan_sock_release(struct vxlan_sock *vs) { struct sock *sk = vs->sock->sk; @@ -1095,17 +1084,16 @@ void vxlan_sock_release(struct vxlan_sock *vs) } EXPORT_SYMBOL_GPL(vxlan_sock_release); -/* Callback to update multicast group membership when first VNI on +/* Update multicast group membership when first VNI on * multicast asddress is brought up - * Done as workqueue because ip_mc_join_group acquires RTNL. */ -static void vxlan_igmp_join(struct work_struct *work) +static int vxlan_igmp_join(struct vxlan_dev *vxlan) { - struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join); struct vxlan_sock *vs = vxlan->vn_sock; struct sock *sk = vs->sock->sk; union vxlan_addr *ip = &vxlan->default_dst.remote_ip; int ifindex = vxlan->default_dst.remote_ifindex; + int ret = -EINVAL; lock_sock(sk); if (ip->sa.sa_family == AF_INET) { @@ -1114,27 +1102,26 @@ static void vxlan_igmp_join(struct work_struct *work) .imr_ifindex = ifindex, }; - ip_mc_join_group(sk, &mreq); + ret = ip_mc_join_group(sk, &mreq); #if IS_ENABLED(CONFIG_IPV6) } else { - ipv6_stub->ipv6_sock_mc_join(sk, ifindex, - &ip->sin6.sin6_addr); + ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex, + &ip->sin6.sin6_addr); #endif } release_sock(sk); - vxlan_sock_release(vs); - dev_put(vxlan->dev); + return ret; } /* Inverse of vxlan_igmp_join when last VNI is brought down */ -static void vxlan_igmp_leave(struct work_struct *work) +static int vxlan_igmp_leave(struct vxlan_dev *vxlan) { - struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave); struct vxlan_sock *vs = vxlan->vn_sock; struct sock *sk = vs->sock->sk; union vxlan_addr *ip = &vxlan->default_dst.remote_ip; int ifindex = vxlan->default_dst.remote_ifindex; + int ret = -EINVAL; lock_sock(sk); if (ip->sa.sa_family == AF_INET) { @@ -1143,18 +1130,16 @@ static void vxlan_igmp_leave(struct work_struct *work) .imr_ifindex = ifindex, }; - ip_mc_leave_group(sk, &mreq); + ret = ip_mc_leave_group(sk, &mreq); #if IS_ENABLED(CONFIG_IPV6) } else { - ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, - &ip->sin6.sin6_addr); + ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex, + &ip->sin6.sin6_addr); #endif } - release_sock(sk); - vxlan_sock_release(vs); - dev_put(vxlan->dev); + return ret; } static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, @@ -1218,7 +1203,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto drop; flags &= ~VXLAN_HF_RCO; - vni &= VXLAN_VID_MASK; + vni &= VXLAN_VNI_MASK; } /* For backwards compatibility, only allow reserved fields to be @@ -1239,7 +1224,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) flags &= ~VXLAN_GBP_USED_BITS; } - if (flags || (vni & ~VXLAN_VID_MASK)) { + if (flags || vni & ~VXLAN_VNI_MASK) { /* If there are any unprocessed flags remaining treat * this as a malformed packet. This behavior diverges from * VXLAN RFC (RFC7348) which stipulates that bits in reserved @@ -2175,37 +2160,22 @@ static void vxlan_cleanup(unsigned long arg) static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) { + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); __u32 vni = vxlan->default_dst.remote_vni; vxlan->vn_sock = vs; + spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); + spin_unlock(&vn->sock_lock); } /* Setup stats when device is created */ static int vxlan_init(struct net_device *dev) { - struct vxlan_dev *vxlan = netdev_priv(dev); - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); - struct vxlan_sock *vs; - bool ipv6 = vxlan->flags & VXLAN_F_IPV6; - dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; - spin_lock(&vn->sock_lock); - vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, - vxlan->dst_port, vxlan->flags); - if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) { - /* If we have a socket with same port already, reuse it */ - vxlan_vs_add_dev(vs, vxlan); - } else { - /* otherwise make new socket outside of RTNL */ - dev_hold(dev); - queue_work(vxlan_wq, &vxlan->sock_work); - } - spin_unlock(&vn->sock_lock); - return 0; } @@ -2223,12 +2193,9 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan) static void vxlan_uninit(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct vxlan_sock *vs = vxlan->vn_sock; vxlan_fdb_delete_default(vxlan); - if (vs) - vxlan_sock_release(vs); free_percpu(dev->tstats); } @@ -2236,22 +2203,28 @@ static void vxlan_uninit(struct net_device *dev) static int vxlan_open(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct vxlan_sock *vs = vxlan->vn_sock; + struct vxlan_sock *vs; + int ret = 0; - /* socket hasn't been created */ - if (!vs) - return -ENOTCONN; + vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL, + false, vxlan->flags); + if (IS_ERR(vs)) + return PTR_ERR(vs); + + vxlan_vs_add_dev(vs, vxlan); if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { - vxlan_sock_hold(vs); - dev_hold(dev); - queue_work(vxlan_wq, &vxlan->igmp_join); + ret = vxlan_igmp_join(vxlan); + if (ret) { + vxlan_sock_release(vs); + return ret; + } } if (vxlan->age_interval) mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); - return 0; + return ret; } /* Purge the forwarding table */ @@ -2279,19 +2252,21 @@ static int vxlan_stop(struct net_device *dev) struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_sock *vs = vxlan->vn_sock; + int ret = 0; - if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && + if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) && !vxlan_group_used(vn, vxlan)) { - vxlan_sock_hold(vs); - dev_hold(dev); - queue_work(vxlan_wq, &vxlan->igmp_leave); + ret = vxlan_igmp_leave(vxlan); + if (ret) + return ret; } del_timer_sync(&vxlan->age_timer); vxlan_flush(vxlan); + vxlan_sock_release(vs); - return 0; + return ret; } /* Stub, nothing needs to be done. */ @@ -2402,9 +2377,6 @@ static void vxlan_setup(struct net_device *dev) INIT_LIST_HEAD(&vxlan->next); spin_lock_init(&vxlan->hash_lock); - INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join); - INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave); - INIT_WORK(&vxlan->sock_work, vxlan_sock_work); init_timer_deferrable(&vxlan->age_timer); vxlan->age_timer.function = vxlan_cleanup; @@ -2516,7 +2488,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); } else { udp_conf.family = AF_INET; - udp_conf.local_ip.s_addr = INADDR_ANY; } udp_conf.local_udp_port = port; @@ -2552,6 +2523,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, sock = vxlan_create_sock(net, ipv6, port, flags); if (IS_ERR(sock)) { + pr_info("Cannot bind port %d, err=%ld\n", ntohs(port), + PTR_ERR(sock)); kfree(vs); return ERR_CAST(sock); } @@ -2591,45 +2564,23 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, struct vxlan_sock *vs; bool ipv6 = flags & VXLAN_F_IPV6; - vs = vxlan_socket_create(net, port, rcv, data, flags); - if (!IS_ERR(vs)) - return vs; - - if (no_share) /* Return error if sharing is not allowed. */ - return vs; - - spin_lock(&vn->sock_lock); - vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags); - if (vs && ((vs->rcv != rcv) || - !atomic_add_unless(&vs->refcnt, 1, 0))) - vs = ERR_PTR(-EBUSY); - spin_unlock(&vn->sock_lock); - - if (!vs) - vs = ERR_PTR(-EINVAL); + if (!no_share) { + spin_lock(&vn->sock_lock); + vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, + flags); + if (vs && vs->rcv == rcv) { + if (!atomic_add_unless(&vs->refcnt, 1, 0)) + vs = ERR_PTR(-EBUSY); + spin_unlock(&vn->sock_lock); + return vs; + } + spin_unlock(&vn->sock_lock); + } - return vs; + return vxlan_socket_create(net, port, rcv, data, flags); } EXPORT_SYMBOL_GPL(vxlan_sock_add); -/* Scheduled at device creation to bind to a socket */ -static void vxlan_sock_work(struct work_struct *work) -{ - struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work); - struct net *net = vxlan->net; - struct vxlan_net *vn = net_generic(net, vxlan_net_id); - __be16 port = vxlan->dst_port; - struct vxlan_sock *nvs; - - nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags); - spin_lock(&vn->sock_lock); - if (!IS_ERR(nvs)) - vxlan_vs_add_dev(nvs, vxlan); - spin_unlock(&vn->sock_lock); - - dev_put(vxlan->dev); -} - static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index ac99798570e8..ea4843be773c 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy, case 0x432a: /* BCM4321 */ case 0x432d: /* BCM4322 */ case 0x4352: /* BCM43222 */ + case 0x435a: /* BCM43228 */ case 0x4333: /* BCM4331 */ case 0x43a2: /* BCM4360 */ case 0x43b3: /* BCM4352 */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c index 50cdf7090198..8eff2753abad 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c @@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, void *dcmd_buf = NULL, *wr_pointer; u16 msglen, maxmsglen = PAGE_SIZE - 0x100; - brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set, - cmdhdr->len); + if (len < sizeof(*cmdhdr)) { + brcmf_err("vendor command too short: %d\n", len); + return -EINVAL; + } vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); ifp = vif->ifp; - len -= sizeof(struct brcmf_vndr_dcmd_hdr); + brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd); + + if (cmdhdr->offset > len) { + brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len); + return -EINVAL; + } + + len -= cmdhdr->offset; ret_len = cmdhdr->len; if (ret_len > 0 || len > 0) { if (len > BRCMF_DCMD_MAXLEN) { diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index c3817fae16c0..06f6cc08f451 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ - .led_mode = IWL_LED_BLINK + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl1000_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", @@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = { .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl100_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index 21e5d0843a62..890b95f497d6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + const struct iwl_cfg iwl2000_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", @@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl2030_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", @@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl105_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", @@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { .base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl135_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 332bbede39e5..724194e23414 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ - .led_mode = IWL_LED_BLINK + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl5300_agn_cfg = { .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", @@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = { .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl5150_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 8f2c3c8c6b84..21b2630763dc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6005_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", @@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6030_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", @@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = { .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6035_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", @@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = { .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ .base_params = &iwl6000_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_BLINK + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6000i_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", @@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = { .base_params = &iwl6050_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6050_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", @@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = { .base_params = &iwl6050_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6150_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index ce99572a982d..638ec0945009 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, if (!vif->bss_conf.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; - if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, + if (mvmvif->phy_ctxt && + IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, mvmvif->phy_ctxt->id)) smps_mode = IEEE80211_SMPS_AUTOMATIC; diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index 9717ee61928c..05d31713ed38 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, if (!vif->bss_conf.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; - if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) + if (mvmvif->phy_ctxt && + data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) smps_mode = IEEE80211_SMPS_AUTOMATIC; IWL_DEBUG_COEX(data->mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 204255423d99..7396b52262b5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -402,7 +402,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) + if ((mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_BEAMFORMER) && + (mvm->fw->ucode_capa.api[0] & + IWL_UCODE_TLV_API_LQ_SS_PARAMS)) hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } @@ -2271,7 +2274,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - iwl_mvm_cancel_scan(mvm); + /* Due to a race condition, it's possible that mac80211 asks + * us to stop a hw_scan when it's already stopped. This can + * happen, for instance, if we stopped the scan ourselves, + * called ieee80211_scan_completed() and the userspace called + * cancel scan scan before ieee80211_scan_work() could run. + * To handle that, simply return if the scan is not running. + */ + /* FIXME: for now, we ignore this race for UMAC scans, since + * they don't set the scan_status. + */ + if ((mvm->scan_status == IWL_MVM_SCAN_OS) || + (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + iwl_mvm_cancel_scan(mvm); mutex_unlock(&mvm->mutex); } @@ -2609,12 +2624,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, int ret; mutex_lock(&mvm->mutex); + + /* Due to a race condition, it's possible that mac80211 asks + * us to stop a sched_scan when it's already stopped. This + * can happen, for instance, if we stopped the scan ourselves, + * called ieee80211_sched_scan_stopped() and the userspace called + * stop sched scan scan before ieee80211_sched_scan_stopped_work() + * could run. To handle this, simply return if the scan is + * not running. + */ + /* FIXME: for now, we ignore this race for UMAC scans, since + * they don't set the scan_status. + */ + if (mvm->scan_status != IWL_MVM_SCAN_SCHED && + !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + mutex_unlock(&mvm->mutex); + return 0; + } + ret = iwl_mvm_scan_offload_stop(mvm, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); return ret; - } static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index f0946b5dd7c8..a75bb150ea27 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -587,8 +587,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) if (mvm->scan_status == IWL_MVM_SCAN_NONE) return 0; - if (iwl_mvm_is_radio_killed(mvm)) + if (iwl_mvm_is_radio_killed(mvm)) { + ret = 0; goto out; + } iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, scan_done_notif, @@ -600,16 +602,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", sched ? "offloaded " : "", ret); iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); - return ret; + goto out; } IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", sched ? "offloaded " : ""); ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); - if (ret) - return ret; - +out: /* * Clear the scan status so the next scan requests will succeed. This * also ensures the Rx handler doesn't do anything, as the scan was @@ -619,7 +619,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) if (mvm->scan_status == IWL_MVM_SCAN_OS) iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); -out: mvm->scan_status = IWL_MVM_SCAN_NONE; if (notify) { @@ -629,7 +628,7 @@ out: ieee80211_scan_completed(mvm->hw, true); } - return 0; + return ret; } static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 54fafbf9a711..f8d6f306dd76 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -750,8 +750,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) * request */ list_for_each_entry(te_data, &mvm->time_event_list, list) { - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE && - te_data->running) { + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); is_p2p = true; goto remove_te; @@ -766,10 +765,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) * request */ list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { - if (te_data->running) { - mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - goto remove_te; - } + mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); + goto remove_te; } remove_te: diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/orinoco/airport.c index 0ca8b1455cd9..77e6c53040a3 100644 --- a/drivers/net/wireless/orinoco/airport.c +++ b/drivers/net/wireless/orinoco/airport.c @@ -228,7 +228,7 @@ MODULE_AUTHOR("Benjamin Herrenschmidt <[email protected]>"); MODULE_DESCRIPTION("Driver for the Apple Airport wireless card."); MODULE_LICENSE("Dual MPL/GPL"); -static struct of_device_id airport_match[] = { +static const struct of_device_id airport_match[] = { { .name = "radio", }, diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 1d4677460711..074f716020aa 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) } return true; - } else if (0x86DD == ether_type) { - return true; + } else if (ETH_P_IPV6 == ether_type) { + /* TODO: Handle any IPv6 cases that need special handling. + * For now, always return false + */ + goto end; } end: diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 589fa256256b..8a495b318b6f 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -238,6 +238,8 @@ struct xenvif { unsigned int num_queues; /* active queues, resource allocated */ unsigned int stalled_queues; + struct xenbus_watch credit_watch; + spinlock_t lock; #ifdef CONFIG_DEBUG_FS @@ -260,6 +262,8 @@ static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif) return to_xenbus_device(vif->dev->dev.parent); } +void xenvif_tx_credit_callback(unsigned long data); + struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, unsigned int handle); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index d2ada7cda97a..1a83e190fc15 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -463,6 +463,7 @@ int xenvif_init_queue(struct xenvif_queue *queue) queue->credit_bytes = queue->remaining_credit = ~0UL; queue->credit_usec = 0UL; init_timer(&queue->credit_timeout); + queue->credit_timeout.function = xenvif_tx_credit_callback; queue->credit_window_start = get_jiffies_64(); queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index cab9f5257f57..b8c471813f4c 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, s8 st); +static void push_tx_responses(struct xenvif_queue *queue); static inline int tx_work_todo(struct xenvif_queue *queue); @@ -641,7 +642,7 @@ static void tx_add_credit(struct xenvif_queue *queue) queue->remaining_credit = min(max_credit, max_burst); } -static void tx_credit_callback(unsigned long data) +void xenvif_tx_credit_callback(unsigned long data) { struct xenvif_queue *queue = (struct xenvif_queue *)data; tx_add_credit(queue); @@ -655,15 +656,10 @@ static void xenvif_tx_err(struct xenvif_queue *queue, unsigned long flags; do { - int notify; - spin_lock_irqsave(&queue->response_lock, flags); make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); + push_tx_responses(queue); spin_unlock_irqrestore(&queue->response_lock, flags); - if (notify) - notify_remote_via_irq(queue->tx_irq); - if (cons == end) break; txp = RING_GET_REQUEST(&queue->tx, cons++); @@ -1169,8 +1165,6 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) if (size > queue->remaining_credit) { queue->credit_timeout.data = (unsigned long)queue; - queue->credit_timeout.function = - tx_credit_callback; mod_timer(&queue->credit_timeout, next_credit); queue->credit_window_start = next_credit; @@ -1657,7 +1651,6 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, { struct pending_tx_info *pending_tx_info; pending_ring_idx_t index; - int notify; unsigned long flags; pending_tx_info = &queue->pending_tx_info[pending_idx]; @@ -1673,12 +1666,9 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, index = pending_index(queue->pending_prod++); queue->pending_ring[index] = pending_idx; - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); + push_tx_responses(queue); spin_unlock_irqrestore(&queue->response_lock, flags); - - if (notify) - notify_remote_via_irq(queue->tx_irq); } @@ -1699,6 +1689,15 @@ static void make_tx_response(struct xenvif_queue *queue, queue->tx.rsp_prod_pvt = ++i; } +static void push_tx_responses(struct xenvif_queue *queue) +{ + int notify; + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); + if (notify) + notify_remote_via_irq(queue->tx_irq); +} + static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, u16 id, s8 st, diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 794204e34fba..3d8dbf5f2d39 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -41,6 +41,7 @@ static void connect(struct backend_info *be); static int read_xenbus_vif_flags(struct backend_info *be); static int backend_create_xenvif(struct backend_info *be); static void unregister_hotplug_status_watch(struct backend_info *be); +static void xen_unregister_watchers(struct xenvif *vif); static void set_backend_state(struct backend_info *be, enum xenbus_state state); @@ -232,6 +233,7 @@ static int netback_remove(struct xenbus_device *dev) unregister_hotplug_status_watch(be); if (be->vif) { kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); + xen_unregister_watchers(be->vif); xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); xenvif_free(be->vif); be->vif = NULL; @@ -430,6 +432,7 @@ static int backend_create_xenvif(struct backend_info *be) static void backend_disconnect(struct backend_info *be) { if (be->vif) { + xen_unregister_watchers(be->vif); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(be->vif); #endif /* CONFIG_DEBUG_FS */ @@ -645,6 +648,59 @@ static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) return 0; } +static void xen_net_rate_changed(struct xenbus_watch *watch, + const char **vec, unsigned int len) +{ + struct xenvif *vif = container_of(watch, struct xenvif, credit_watch); + struct xenbus_device *dev = xenvif_to_xenbus_device(vif); + unsigned long credit_bytes; + unsigned long credit_usec; + unsigned int queue_index; + + xen_net_read_rate(dev, &credit_bytes, &credit_usec); + for (queue_index = 0; queue_index < vif->num_queues; queue_index++) { + struct xenvif_queue *queue = &vif->queues[queue_index]; + + queue->credit_bytes = credit_bytes; + queue->credit_usec = credit_usec; + if (!mod_timer_pending(&queue->credit_timeout, jiffies) && + queue->remaining_credit > queue->credit_bytes) { + queue->remaining_credit = queue->credit_bytes; + } + } +} + +static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif) +{ + int err = 0; + char *node; + unsigned maxlen = strlen(dev->nodename) + sizeof("/rate"); + + node = kmalloc(maxlen, GFP_KERNEL); + if (!node) + return -ENOMEM; + snprintf(node, maxlen, "%s/rate", dev->nodename); + vif->credit_watch.node = node; + vif->credit_watch.callback = xen_net_rate_changed; + err = register_xenbus_watch(&vif->credit_watch); + if (err) { + pr_err("Failed to set watcher %s\n", vif->credit_watch.node); + kfree(node); + vif->credit_watch.node = NULL; + vif->credit_watch.callback = NULL; + } + return err; +} + +static void xen_unregister_watchers(struct xenvif *vif) +{ + if (vif->credit_watch.node) { + unregister_xenbus_watch(&vif->credit_watch); + kfree(vif->credit_watch.node); + vif->credit_watch.node = NULL; + } +} + static void unregister_hotplug_status_watch(struct backend_info *be) { if (be->have_hotplug_status_watch) { @@ -709,6 +765,7 @@ static void connect(struct backend_info *be) } xen_net_read_rate(dev, &credit_bytes, &credit_usec); + xen_register_watchers(dev, be->vif); read_xenbus_vif_flags(be); /* Use the number of queues requested by the frontend */ diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 38d1c51f58b1..7bcaeec876c0 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -84,8 +84,7 @@ config OF_RESOLVE bool config OF_OVERLAY - bool - depends on OF + bool "Device Tree overlays" select OF_DYNAMIC select OF_RESOLVE diff --git a/drivers/of/base.c b/drivers/of/base.c index 0a8aeb8523fe..adb8764861c0 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -714,16 +714,17 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent, const char *path) { struct device_node *child; - int len = strchrnul(path, '/') - path; - int term; + int len; + const char *end; + end = strchr(path, ':'); + if (!end) + end = strchrnul(path, '/'); + + len = end - path; if (!len) return NULL; - term = strchrnul(path, ':') - path; - if (term < len) - len = term; - __for_each_child_of_node(parent, child) { const char *name = strrchr(child->full_name, '/'); if (WARN(!name, "malformed device_node %s\n", child->full_name)) @@ -768,8 +769,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt /* The path could begin with an alias */ if (*path != '/') { - char *p = strchrnul(path, '/'); - int len = separator ? separator - path : p - path; + int len; + const char *p = separator; + + if (!p) + p = strchrnul(path, '/'); + len = p - path; /* of_aliases must not be NULL */ if (!of_aliases) @@ -794,6 +799,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt path++; /* Increment past '/' delimiter */ np = __of_find_node_by_path(np, path); path = strchrnul(path, '/'); + if (separator && separator < path) + break; } raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; @@ -1886,8 +1893,10 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (IS_ENABLED(CONFIG_PPC) && !name) name = of_get_property(of_aliases, "stdout", NULL); - if (name) + if (name) { of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); + add_preferred_console("stdout-path", 0, NULL); + } } if (!of_aliases) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 1bd43053b8c7..0c064485d1c2 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -88,7 +88,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi return 0; } -static int of_mdio_parse_addr(struct device *dev, const struct device_node *np) +int of_mdio_parse_addr(struct device *dev, const struct device_node *np) { u32 addr; int ret; @@ -108,6 +108,7 @@ static int of_mdio_parse_addr(struct device *dev, const struct device_node *np) return addr; } +EXPORT_SYMBOL(of_mdio_parse_addr); /** * of_mdiobus_register - Register mii_bus and create PHYs from the device tree diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 352b4f28f82c..dee9270ba547 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -19,6 +19,7 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/err.h> +#include <linux/idr.h> #include "of_private.h" @@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, struct device_node *target, struct device_node *child) { const char *cname; - struct device_node *tchild, *grandchild; + struct device_node *tchild; int ret = 0; cname = kbasename(child->full_name); diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 0cf9a236d438..aba8946cac46 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -92,6 +92,11 @@ static void __init of_selftest_find_node_by_name(void) "option path test failed\n"); of_node_put(np); + np = of_find_node_opts_by_path("/testcase-data:test/option", &options); + selftest(np && !strcmp("test/option", options), + "option path test, subcase #1 failed\n"); + of_node_put(np); + np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); selftest(np, "NULL option path test failed\n"); of_node_put(np); @@ -102,6 +107,12 @@ static void __init of_selftest_find_node_by_name(void) "option alias path test failed\n"); of_node_put(np); + np = of_find_node_opts_by_path("testcase-alias:test/alias/option", + &options); + selftest(np && !strcmp("test/alias/option", options), + "option alias path test, subcase #1 failed\n"); + of_node_put(np); + np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); selftest(np, "NULL option alias path test failed\n"); of_node_put(np); @@ -378,9 +389,9 @@ static void __init of_selftest_property_string(void) rc = of_property_match_string(np, "phandle-list-names", "first"); selftest(rc == 0, "first expected:0 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "second"); - selftest(rc == 1, "second expected:0 got:%i\n", rc); + selftest(rc == 1, "second expected:1 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "third"); - selftest(rc == 2, "third expected:0 got:%i\n", rc); + selftest(rc == 2, "third expected:2 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "fourth"); selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); rc = of_property_match_string(np, "missing-property", "blah"); @@ -478,7 +489,6 @@ static void __init of_selftest_changeset(void) struct device_node *n1, *n2, *n21, *nremove, *parent, *np; struct of_changeset chgset; - of_changeset_init(&chgset); n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); selftest(n1, "testcase setup failure\n"); n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); @@ -979,7 +989,7 @@ static int of_path_platform_device_exists(const char *path) return pdev != NULL; } -#if IS_ENABLED(CONFIG_I2C) +#if IS_BUILTIN(CONFIG_I2C) /* get the i2c client device instantiated at the path */ static struct i2c_client *of_path_to_i2c_client(const char *path) @@ -1445,7 +1455,7 @@ static void of_selftest_overlay_11(void) return; } -#if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) +#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) struct selftest_i2c_bus_data { struct platform_device *pdev; @@ -1584,7 +1594,7 @@ static struct i2c_driver selftest_i2c_dev_driver = { .id_table = selftest_i2c_dev_id, }; -#if IS_ENABLED(CONFIG_I2C_MUX) +#if IS_BUILTIN(CONFIG_I2C_MUX) struct selftest_i2c_mux_data { int nchans; @@ -1695,7 +1705,7 @@ static int of_selftest_overlay_i2c_init(void) "could not register selftest i2c bus driver\n")) return ret; -#if IS_ENABLED(CONFIG_I2C_MUX) +#if IS_BUILTIN(CONFIG_I2C_MUX) ret = i2c_add_driver(&selftest_i2c_mux_driver); if (selftest(ret == 0, "could not register selftest i2c mux driver\n")) @@ -1707,7 +1717,7 @@ static int of_selftest_overlay_i2c_init(void) static void of_selftest_overlay_i2c_cleanup(void) { -#if IS_ENABLED(CONFIG_I2C_MUX) +#if IS_BUILTIN(CONFIG_I2C_MUX) i2c_del_driver(&selftest_i2c_mux_driver); #endif platform_driver_unregister(&selftest_i2c_bus_driver); @@ -1814,7 +1824,7 @@ static void __init of_selftest_overlay(void) of_selftest_overlay_10(); of_selftest_overlay_11(); -#if IS_ENABLED(CONFIG_I2C) +#if IS_BUILTIN(CONFIG_I2C) if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) goto out; diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index aab55474dd0d..ee082c0366ec 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) return false; } -static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, +static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int offset) { struct xgene_pcie_port *port = bus->sysdata; @@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, return NULL; xgene_pcie_set_rtdid_reg(bus, devfn); - return xgene_pcie_get_cfg_base(bus); + return xgene_pcie_get_cfg_base(bus) + offset; } static struct pci_ops xgene_pcie_ops = { diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index aa012fb3834b..312f23a8429c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev, struct pci_dev *pdev = to_pci_dev(dev); char *driver_override, *old = pdev->driver_override, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); @@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev, { struct pci_dev *pdev = to_pci_dev(dev); - return sprintf(buf, "%s\n", pdev->driver_override); + return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 5afe03e28b91..2062c224e32f 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -66,6 +66,10 @@ #define BYT_DIR_MASK (BIT(1) | BIT(2)) #define BYT_TRIG_MASK (BIT(26) | BIT(25) | BIT(24)) +#define BYT_CONF0_RESTORE_MASK (BYT_DIRECT_IRQ_EN | BYT_TRIG_MASK | \ + BYT_PIN_MUX) +#define BYT_VAL_RESTORE_MASK (BYT_DIR_MASK | BYT_LEVEL) + #define BYT_NGPIO_SCORE 102 #define BYT_NGPIO_NCORE 28 #define BYT_NGPIO_SUS 44 @@ -134,12 +138,18 @@ static struct pinctrl_gpio_range byt_ranges[] = { }, }; +struct byt_gpio_pin_context { + u32 conf0; + u32 val; +}; + struct byt_gpio { struct gpio_chip chip; struct platform_device *pdev; spinlock_t lock; void __iomem *reg_base; struct pinctrl_gpio_range *range; + struct byt_gpio_pin_context *saved_context; }; #define to_byt_gpio(c) container_of(c, struct byt_gpio, chip) @@ -158,40 +168,62 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset, return vg->reg_base + reg_offset + reg; } -static bool is_special_pin(struct byt_gpio *vg, unsigned offset) +static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset) +{ + void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); + unsigned long flags; + u32 value; + + spin_lock_irqsave(&vg->lock, flags); + value = readl(reg); + value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); + writel(value, reg); + spin_unlock_irqrestore(&vg->lock, flags); +} + +static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset) { /* SCORE pin 92-93 */ if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) && offset >= 92 && offset <= 93) - return true; + return 1; /* SUS pin 11-21 */ if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) && offset >= 11 && offset <= 21) - return true; + return 1; - return false; + return 0; } static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) { struct byt_gpio *vg = to_byt_gpio(chip); void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG); - u32 value; - bool special; + u32 value, gpio_mux; /* * In most cases, func pin mux 000 means GPIO function. * But, some pins may have func pin mux 001 represents - * GPIO function. Only allow user to export pin with - * func pin mux preset as GPIO function by BIOS/FW. + * GPIO function. + * + * Because there are devices out there where some pins were not + * configured correctly we allow changing the mux value from + * request (but print out warning about that). */ value = readl(reg) & BYT_PIN_MUX; - special = is_special_pin(vg, offset); - if ((special && value != 1) || (!special && value)) { - dev_err(&vg->pdev->dev, - "pin %u cannot be used as GPIO.\n", offset); - return -EINVAL; + gpio_mux = byt_get_gpio_mux(vg, offset); + if (WARN_ON(gpio_mux != value)) { + unsigned long flags; + + spin_lock_irqsave(&vg->lock, flags); + value = readl(reg) & ~BYT_PIN_MUX; + value |= gpio_mux; + writel(value, reg); + spin_unlock_irqrestore(&vg->lock, flags); + + dev_warn(&vg->pdev->dev, + "pin %u forcibly re-configured as GPIO\n", offset); } pm_runtime_get(&vg->pdev->dev); @@ -202,14 +234,8 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset) static void byt_gpio_free(struct gpio_chip *chip, unsigned offset) { struct byt_gpio *vg = to_byt_gpio(chip); - void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); - u32 value; - - /* clear interrupt triggering */ - value = readl(reg); - value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); - writel(value, reg); + byt_gpio_clear_triggering(vg, offset); pm_runtime_put(&vg->pdev->dev); } @@ -236,23 +262,13 @@ static int byt_irq_type(struct irq_data *d, unsigned type) value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); - switch (type) { - case IRQ_TYPE_LEVEL_HIGH: - value |= BYT_TRIG_LVL; - case IRQ_TYPE_EDGE_RISING: - value |= BYT_TRIG_POS; - break; - case IRQ_TYPE_LEVEL_LOW: - value |= BYT_TRIG_LVL; - case IRQ_TYPE_EDGE_FALLING: - value |= BYT_TRIG_NEG; - break; - case IRQ_TYPE_EDGE_BOTH: - value |= (BYT_TRIG_NEG | BYT_TRIG_POS); - break; - } writel(value, reg); + if (type & IRQ_TYPE_EDGE_BOTH) + __irq_set_handler_locked(d->irq, handle_edge_irq); + else if (type & IRQ_TYPE_LEVEL_MASK) + __irq_set_handler_locked(d->irq, handle_level_irq); + spin_unlock_irqrestore(&vg->lock, flags); return 0; @@ -410,58 +426,80 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) struct irq_data *data = irq_desc_get_irq_data(desc); struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc)); struct irq_chip *chip = irq_data_get_irq_chip(data); - u32 base, pin, mask; + u32 base, pin; void __iomem *reg; - u32 pending; + unsigned long pending; unsigned virq; - int looplimit = 0; /* check from GPIO controller which pin triggered the interrupt */ for (base = 0; base < vg->chip.ngpio; base += 32) { - reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); - - while ((pending = readl(reg))) { - pin = __ffs(pending); - mask = BIT(pin); - /* Clear before handling so we can't lose an edge */ - writel(mask, reg); - + pending = readl(reg); + for_each_set_bit(pin, &pending, 32) { virq = irq_find_mapping(vg->chip.irqdomain, base + pin); generic_handle_irq(virq); - - /* In case bios or user sets triggering incorretly a pin - * might remain in "interrupt triggered" state. - */ - if (looplimit++ > 32) { - dev_err(&vg->pdev->dev, - "Gpio %d interrupt flood, disabling\n", - base + pin); - - reg = byt_gpio_reg(&vg->chip, base + pin, - BYT_CONF0_REG); - mask = readl(reg); - mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS | - BYT_TRIG_LVL); - writel(mask, reg); - mask = readl(reg); /* flush */ - break; - } } } chip->irq_eoi(data); } +static void byt_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct byt_gpio *vg = to_byt_gpio(gc); + unsigned offset = irqd_to_hwirq(d); + void __iomem *reg; + + reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG); + writel(BIT(offset % 32), reg); +} + static void byt_irq_unmask(struct irq_data *d) { + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct byt_gpio *vg = to_byt_gpio(gc); + unsigned offset = irqd_to_hwirq(d); + unsigned long flags; + void __iomem *reg; + u32 value; + + spin_lock_irqsave(&vg->lock, flags); + + reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG); + value = readl(reg); + + switch (irqd_get_trigger_type(d)) { + case IRQ_TYPE_LEVEL_HIGH: + value |= BYT_TRIG_LVL; + case IRQ_TYPE_EDGE_RISING: + value |= BYT_TRIG_POS; + break; + case IRQ_TYPE_LEVEL_LOW: + value |= BYT_TRIG_LVL; + case IRQ_TYPE_EDGE_FALLING: + value |= BYT_TRIG_NEG; + break; + case IRQ_TYPE_EDGE_BOTH: + value |= (BYT_TRIG_NEG | BYT_TRIG_POS); + break; + } + + writel(value, reg); + + spin_unlock_irqrestore(&vg->lock, flags); } static void byt_irq_mask(struct irq_data *d) { + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct byt_gpio *vg = to_byt_gpio(gc); + + byt_gpio_clear_triggering(vg, irqd_to_hwirq(d)); } static struct irq_chip byt_irqchip = { .name = "BYT-GPIO", + .irq_ack = byt_irq_ack, .irq_mask = byt_irq_mask, .irq_unmask = byt_irq_unmask, .irq_set_type = byt_irq_type, @@ -472,6 +510,21 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) { void __iomem *reg; u32 base, value; + int i; + + /* + * Clear interrupt triggers for all pins that are GPIOs and + * do not use direct IRQ mode. This will prevent spurious + * interrupts from misconfigured pins. + */ + for (i = 0; i < vg->chip.ngpio; i++) { + value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG)); + if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) && + !(value & BYT_DIRECT_IRQ_EN)) { + byt_gpio_clear_triggering(vg, i); + dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i); + } + } /* clear interrupt status trigger registers */ for (base = 0; base < vg->chip.ngpio; base += 32) { @@ -541,6 +594,11 @@ static int byt_gpio_probe(struct platform_device *pdev) gc->can_sleep = false; gc->dev = dev; +#ifdef CONFIG_PM_SLEEP + vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio, + sizeof(*vg->saved_context), GFP_KERNEL); +#endif + ret = gpiochip_add(gc); if (ret) { dev_err(&pdev->dev, "failed adding byt-gpio chip\n"); @@ -569,6 +627,69 @@ static int byt_gpio_probe(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int byt_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct byt_gpio *vg = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < vg->chip.ngpio; i++) { + void __iomem *reg; + u32 value; + + reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG); + value = readl(reg) & BYT_CONF0_RESTORE_MASK; + vg->saved_context[i].conf0 = value; + + reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG); + value = readl(reg) & BYT_VAL_RESTORE_MASK; + vg->saved_context[i].val = value; + } + + return 0; +} + +static int byt_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct byt_gpio *vg = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < vg->chip.ngpio; i++) { + void __iomem *reg; + u32 value; + + reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG); + value = readl(reg); + if ((value & BYT_CONF0_RESTORE_MASK) != + vg->saved_context[i].conf0) { + value &= ~BYT_CONF0_RESTORE_MASK; + value |= vg->saved_context[i].conf0; + writel(value, reg); + dev_info(dev, "restored pin %d conf0 %#08x", i, value); + } + + reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG); + value = readl(reg); + if ((value & BYT_VAL_RESTORE_MASK) != + vg->saved_context[i].val) { + u32 v; + + v = value & ~BYT_VAL_RESTORE_MASK; + v |= vg->saved_context[i].val; + if (v != value) { + writel(v, reg); + dev_dbg(dev, "restored pin %d val %#08x\n", + i, v); + } + } + } + + return 0; +} +#endif + static int byt_gpio_runtime_suspend(struct device *dev) { return 0; @@ -580,8 +701,9 @@ static int byt_gpio_runtime_resume(struct device *dev) } static const struct dev_pm_ops byt_gpio_pm_ops = { - .runtime_suspend = byt_gpio_runtime_suspend, - .runtime_resume = byt_gpio_runtime_resume, + SET_LATE_SYSTEM_SLEEP_PM_OPS(byt_gpio_suspend, byt_gpio_resume) + SET_RUNTIME_PM_OPS(byt_gpio_runtime_suspend, byt_gpio_runtime_resume, + NULL) }; static const struct acpi_device_id byt_gpio_acpi_match[] = { diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 3034fd03bced..82f691eeeec4 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1226,6 +1226,7 @@ static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned offset) static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { + chv_gpio_set(chip, offset, value); return pinctrl_gpio_direction_output(chip->base + offset); } diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index f4cd0b9b2438..a4814066ea08 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1477,28 +1477,25 @@ static void gpio_irq_ack(struct irq_data *d) /* the interrupt is already cleared before by reading ISR */ } -static unsigned int gpio_irq_startup(struct irq_data *d) +static int gpio_irq_request_res(struct irq_data *d) { struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); unsigned pin = d->hwirq; int ret; ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin); - if (ret) { + if (ret) dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n", d->hwirq); - return ret; - } - gpio_irq_unmask(d); - return 0; + + return ret; } -static void gpio_irq_shutdown(struct irq_data *d) +static void gpio_irq_release_res(struct irq_data *d) { struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); unsigned pin = d->hwirq; - gpio_irq_mask(d); gpiochip_unlock_as_irq(&at91_gpio->chip, pin); } @@ -1577,8 +1574,8 @@ void at91_pinctrl_gpio_resume(void) static struct irq_chip gpio_irqchip = { .name = "GPIO", .irq_ack = gpio_irq_ack, - .irq_startup = gpio_irq_startup, - .irq_shutdown = gpio_irq_shutdown, + .irq_request_resources = gpio_irq_request_res, + .irq_release_resources = gpio_irq_release_res, .irq_disable = gpio_irq_mask, .irq_mask = gpio_irq_mask, .irq_unmask = gpio_irq_unmask, diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c index 24c5d88f943f..3c68a8e5e0dd 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c @@ -1011,6 +1011,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = { .pins = sun4i_a10_pins, .npins = ARRAY_SIZE(sun4i_a10_pins), .irq_banks = 1, + .irq_read_needs_mux = true, }; static int sun4i_a10_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 3d0744337736..f8e171b76693 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include "../core.h" +#include "../../gpio/gpiolib.h" #include "pinctrl-sunxi.h" static struct irq_chip sunxi_pinctrl_edge_irq_chip; @@ -464,10 +465,19 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip, static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset) { struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); - u32 reg = sunxi_data_reg(offset); u8 index = sunxi_data_offset(offset); - u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; + u32 set_mux = pctl->desc->irq_read_needs_mux && + test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); + u32 val; + + if (set_mux) + sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT); + + val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK; + + if (set_mux) + sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ); return val; } diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index 5a51523a3459..e248e81a0f9e 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h @@ -77,6 +77,9 @@ #define IRQ_LEVEL_LOW 0x03 #define IRQ_EDGE_BOTH 0x04 +#define SUN4I_FUNC_INPUT 0 +#define SUN4I_FUNC_IRQ 6 + struct sunxi_desc_function { const char *name; u8 muxval; @@ -94,6 +97,7 @@ struct sunxi_pinctrl_desc { int npins; unsigned pin_base; unsigned irq_banks; + bool irq_read_needs_mux; }; struct sunxi_pinctrl_function { diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 1245dca79009..a4a8a6dc60c4 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1839,10 +1839,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev) } if (rdev->ena_pin) { - ret = regulator_ena_gpio_ctrl(rdev, true); - if (ret < 0) - return ret; - rdev->ena_gpio_state = 1; + if (!rdev->ena_gpio_state) { + ret = regulator_ena_gpio_ctrl(rdev, true); + if (ret < 0) + return ret; + rdev->ena_gpio_state = 1; + } } else if (rdev->desc->ops->enable) { ret = rdev->desc->ops->enable(rdev); if (ret < 0) @@ -1939,10 +1941,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev) trace_regulator_disable(rdev_get_name(rdev)); if (rdev->ena_pin) { - ret = regulator_ena_gpio_ctrl(rdev, false); - if (ret < 0) - return ret; - rdev->ena_gpio_state = 0; + if (rdev->ena_gpio_state) { + ret = regulator_ena_gpio_ctrl(rdev, false); + if (ret < 0) + return ret; + rdev->ena_gpio_state = 0; + } } else if (rdev->desc->ops->disable) { ret = rdev->desc->ops->disable(rdev); @@ -3626,12 +3630,6 @@ regulator_register(const struct regulator_desc *regulator_desc, config->ena_gpio, ret); goto wash; } - - if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH) - rdev->ena_gpio_state = 1; - - if (config->ena_gpio_invert) - rdev->ena_gpio_state = !rdev->ena_gpio_state; } /* set regulator constraints */ @@ -3800,9 +3798,11 @@ int regulator_suspend_finish(void) list_for_each_entry(rdev, ®ulator_list, list) { mutex_lock(&rdev->mutex); if (rdev->use_count > 0 || rdev->constraints->always_on) { - error = _regulator_do_enable(rdev); - if (error) - ret = error; + if (!_regulator_is_enabled(rdev)) { + error = _regulator_do_enable(rdev); + if (error) + ret = error; + } } else { if (!have_full_constraints()) goto unlock; diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index e2cffe01b807..fb991ec76423 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 92f6af6da699..73354ee27877 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -951,6 +951,7 @@ static int rpmsg_probe(struct virtio_device *vdev) void *bufs_va; int err = 0, i; size_t total_buf_space; + bool notify; vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); if (!vrp) @@ -1030,8 +1031,22 @@ static int rpmsg_probe(struct virtio_device *vdev) } } + /* + * Prepare to kick but don't notify yet - we can't do this before + * device is ready. + */ + notify = virtqueue_kick_prepare(vrp->rvq); + + /* From this point on, we can notify and get callbacks. */ + virtio_device_ready(vdev); + /* tell the remote processor it can start sending messages */ - virtqueue_kick(vrp->rvq); + /* + * this might be concurrent with callbacks, but we are only + * doing notify, not a full kick here, so that's ok. + */ + if (notify) + virtqueue_notify(vrp->rvq); dev_info(&vdev->dev, "rpmsg host is online\n"); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4241eeab3386..f4cf6851fae9 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = { static struct s3c_rtc_data const s3c6410_rtc_data = { .max_user_freq = 32768, + .needs_src_clk = true, .irq_handler = s3c6410_rtc_irq, .set_freq = s3c6410_rtc_setfreq, .enable_tick = s3c6410_rtc_enable_tick, diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index d9631e15f7b5..dbe416ff46c2 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -1172,7 +1172,7 @@ static struct pci_error_handlers csio_err_handler = { * Macros needed to support the PCI Device ID Table ... */ #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ - static struct pci_device_id csio_pci_tbl[] = { + static const struct pci_device_id csio_pci_tbl[] = { /* Define for FCoE uses PF6 */ #define CH_PCI_DEVICE_ID_FUNCTION 0x6 diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 62b58d38ce2e..60de66252fa2 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work) struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; struct sas_ha_struct *ha = port->ha; + struct domain_device *ddev = port->port_dev; /* prevent revalidation from finding sata links in recovery */ mutex_lock(&ha->disco_mutex); @@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work) SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, task_pid_nr(current)); - if (port->port_dev) - res = sas_ex_revalidate_domain(port->port_dev); + if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || + ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) + res = sas_ex_revalidate_domain(ddev); SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", port->id, task_pid_nr(current), res); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index af98b096af2f..175c9956cbe3 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -144,10 +144,9 @@ struct ffs_io_data { bool read; struct kiocb *kiocb; - const struct iovec *iovec; - unsigned long nr_segs; - char __user *buf; - size_t len; + struct iov_iter data; + const void *to_free; + char *buf; struct mm_struct *mm; struct work_struct work; @@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work) io_data->req->actual; if (io_data->read && ret > 0) { - int i; - size_t pos = 0; - - /* - * Since req->length may be bigger than io_data->len (after - * being rounded up to maxpacketsize), we may end up with more - * data then user space has space for. - */ - ret = min_t(int, ret, io_data->len); - use_mm(io_data->mm); - for (i = 0; i < io_data->nr_segs; i++) { - size_t len = min_t(size_t, ret - pos, - io_data->iovec[i].iov_len); - if (!len) - break; - if (unlikely(copy_to_user(io_data->iovec[i].iov_base, - &io_data->buf[pos], len))) { - ret = -EFAULT; - break; - } - pos += len; - } + ret = copy_to_iter(io_data->buf, ret, &io_data->data); + if (iov_iter_count(&io_data->data)) + ret = -EFAULT; unuse_mm(io_data->mm); } @@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work) io_data->kiocb->private = NULL; if (io_data->read) - kfree(io_data->iovec); + kfree(io_data->to_free); kfree(io_data->buf); kfree(io_data); } @@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) * before the waiting completes, so do not assign to 'gadget' earlier */ struct usb_gadget *gadget = epfile->ffs->gadget; + size_t copied; spin_lock_irq(&epfile->ffs->eps_lock); /* In the meantime, endpoint got disabled or changed. */ @@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) spin_unlock_irq(&epfile->ffs->eps_lock); return -ESHUTDOWN; } + data_len = iov_iter_count(&io_data->data); /* * Controller may require buffer size to be aligned to * maxpacketsize of an out endpoint. */ - data_len = io_data->read ? - usb_ep_align_maybe(gadget, ep->ep, io_data->len) : - io_data->len; + if (io_data->read) + data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); spin_unlock_irq(&epfile->ffs->eps_lock); data = kmalloc(data_len, GFP_KERNEL); if (unlikely(!data)) return -ENOMEM; - if (io_data->aio && !io_data->read) { - int i; - size_t pos = 0; - for (i = 0; i < io_data->nr_segs; i++) { - if (unlikely(copy_from_user(&data[pos], - io_data->iovec[i].iov_base, - io_data->iovec[i].iov_len))) { - ret = -EFAULT; - goto error; - } - pos += io_data->iovec[i].iov_len; - } - } else { - if (!io_data->read && - unlikely(__copy_from_user(data, io_data->buf, - io_data->len))) { + if (!io_data->read) { + copied = copy_from_iter(data, data_len, &io_data->data); + if (copied != data_len) { ret = -EFAULT; goto error; } @@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) */ ret = ep->status; if (io_data->read && ret > 0) { - ret = min_t(size_t, ret, io_data->len); - - if (unlikely(copy_to_user(io_data->buf, - data, ret))) + ret = copy_to_iter(data, ret, &io_data->data); + if (unlikely(iov_iter_count(&io_data->data))) ret = -EFAULT; } } @@ -898,37 +864,6 @@ error: return ret; } -static ssize_t -ffs_epfile_write(struct file *file, const char __user *buf, size_t len, - loff_t *ptr) -{ - struct ffs_io_data io_data; - - ENTER(); - - io_data.aio = false; - io_data.read = false; - io_data.buf = (char * __user)buf; - io_data.len = len; - - return ffs_epfile_io(file, &io_data); -} - -static ssize_t -ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) -{ - struct ffs_io_data io_data; - - ENTER(); - - io_data.aio = false; - io_data.read = true; - io_data.buf = buf; - io_data.len = len; - - return ffs_epfile_io(file, &io_data); -} - static int ffs_epfile_open(struct inode *inode, struct file *file) { @@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb) return value; } -static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, - const struct iovec *iovec, - unsigned long nr_segs, loff_t loff) +static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) { - struct ffs_io_data *io_data; + struct ffs_io_data io_data, *p = &io_data; + ssize_t res; ENTER(); - io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); - if (unlikely(!io_data)) - return -ENOMEM; + if (!is_sync_kiocb(kiocb)) { + p = kmalloc(sizeof(io_data), GFP_KERNEL); + if (unlikely(!p)) + return -ENOMEM; + p->aio = true; + } else { + p->aio = false; + } - io_data->aio = true; - io_data->read = false; - io_data->kiocb = kiocb; - io_data->iovec = iovec; - io_data->nr_segs = nr_segs; - io_data->len = kiocb->ki_nbytes; - io_data->mm = current->mm; + p->read = false; + p->kiocb = kiocb; + p->data = *from; + p->mm = current->mm; - kiocb->private = io_data; + kiocb->private = p; kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); - return ffs_epfile_io(kiocb->ki_filp, io_data); + res = ffs_epfile_io(kiocb->ki_filp, p); + if (res == -EIOCBQUEUED) + return res; + if (p->aio) + kfree(p); + else + *from = p->data; + return res; } -static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, - const struct iovec *iovec, - unsigned long nr_segs, loff_t loff) +static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) { - struct ffs_io_data *io_data; - struct iovec *iovec_copy; + struct ffs_io_data io_data, *p = &io_data; + ssize_t res; ENTER(); - iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); - if (unlikely(!iovec_copy)) - return -ENOMEM; - - memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); - - io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); - if (unlikely(!io_data)) { - kfree(iovec_copy); - return -ENOMEM; + if (!is_sync_kiocb(kiocb)) { + p = kmalloc(sizeof(io_data), GFP_KERNEL); + if (unlikely(!p)) + return -ENOMEM; + p->aio = true; + } else { + p->aio = false; } - io_data->aio = true; - io_data->read = true; - io_data->kiocb = kiocb; - io_data->iovec = iovec_copy; - io_data->nr_segs = nr_segs; - io_data->len = kiocb->ki_nbytes; - io_data->mm = current->mm; + p->read = true; + p->kiocb = kiocb; + if (p->aio) { + p->to_free = dup_iter(&p->data, to, GFP_KERNEL); + if (!p->to_free) { + kfree(p); + return -ENOMEM; + } + } else { + p->data = *to; + p->to_free = NULL; + } + p->mm = current->mm; - kiocb->private = io_data; + kiocb->private = p; kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); - return ffs_epfile_io(kiocb->ki_filp, io_data); + res = ffs_epfile_io(kiocb->ki_filp, p); + if (res == -EIOCBQUEUED) + return res; + + if (p->aio) { + kfree(p->to_free); + kfree(p); + } else { + *to = p->data; + } + return res; } static int @@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = { .llseek = no_llseek, .open = ffs_epfile_open, - .write = ffs_epfile_write, - .read = ffs_epfile_read, - .aio_write = ffs_epfile_aio_write, - .aio_read = ffs_epfile_aio_read, + .write = new_sync_write, + .read = new_sync_read, + .write_iter = ffs_epfile_write_iter, + .read_iter = ffs_epfile_read_iter, .release = ffs_epfile_release, .unlocked_ioctl = ffs_epfile_ioctl, }; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index db49ec4c748e..200f9a584064 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC); MODULE_AUTHOR ("David Brownell"); MODULE_LICENSE ("GPL"); +static int ep_open(struct inode *, struct file *); + /*----------------------------------------------------------------------*/ @@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req) * still need dev->lock to use epdata->ep. */ static int -get_ready_ep (unsigned f_flags, struct ep_data *epdata) +get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write) { int val; if (f_flags & O_NONBLOCK) { if (!mutex_trylock(&epdata->lock)) goto nonblock; - if (epdata->state != STATE_EP_ENABLED) { + if (epdata->state != STATE_EP_ENABLED && + (!is_write || epdata->state != STATE_EP_READY)) { mutex_unlock(&epdata->lock); nonblock: val = -EAGAIN; @@ -305,18 +308,20 @@ nonblock: switch (epdata->state) { case STATE_EP_ENABLED: + return 0; + case STATE_EP_READY: /* not configured yet */ + if (is_write) + return 0; + // FALLTHRU + case STATE_EP_UNBOUND: /* clean disconnect */ break; // case STATE_EP_DISABLED: /* "can't happen" */ - // case STATE_EP_READY: /* "can't happen" */ default: /* error! */ pr_debug ("%s: ep %p not available, state %d\n", shortname, epdata, epdata->state); - // FALLTHROUGH - case STATE_EP_UNBOUND: /* clean disconnect */ - val = -ENODEV; - mutex_unlock(&epdata->lock); } - return val; + mutex_unlock(&epdata->lock); + return -ENODEV; } static ssize_t @@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) return value; } - -/* handle a synchronous OUT bulk/intr/iso transfer */ -static ssize_t -ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) -{ - struct ep_data *data = fd->private_data; - void *kbuf; - ssize_t value; - - if ((value = get_ready_ep (fd->f_flags, data)) < 0) - return value; - - /* halt any endpoint by doing a "wrong direction" i/o call */ - if (usb_endpoint_dir_in(&data->desc)) { - if (usb_endpoint_xfer_isoc(&data->desc)) { - mutex_unlock(&data->lock); - return -EINVAL; - } - DBG (data->dev, "%s halt\n", data->name); - spin_lock_irq (&data->dev->lock); - if (likely (data->ep != NULL)) - usb_ep_set_halt (data->ep); - spin_unlock_irq (&data->dev->lock); - mutex_unlock(&data->lock); - return -EBADMSG; - } - - /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ - - value = -ENOMEM; - kbuf = kmalloc (len, GFP_KERNEL); - if (unlikely (!kbuf)) - goto free1; - - value = ep_io (data, kbuf, len); - VDEBUG (data->dev, "%s read %zu OUT, status %d\n", - data->name, len, (int) value); - if (value >= 0 && copy_to_user (buf, kbuf, value)) - value = -EFAULT; - -free1: - mutex_unlock(&data->lock); - kfree (kbuf); - return value; -} - -/* handle a synchronous IN bulk/intr/iso transfer */ -static ssize_t -ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) -{ - struct ep_data *data = fd->private_data; - void *kbuf; - ssize_t value; - - if ((value = get_ready_ep (fd->f_flags, data)) < 0) - return value; - - /* halt any endpoint by doing a "wrong direction" i/o call */ - if (!usb_endpoint_dir_in(&data->desc)) { - if (usb_endpoint_xfer_isoc(&data->desc)) { - mutex_unlock(&data->lock); - return -EINVAL; - } - DBG (data->dev, "%s halt\n", data->name); - spin_lock_irq (&data->dev->lock); - if (likely (data->ep != NULL)) - usb_ep_set_halt (data->ep); - spin_unlock_irq (&data->dev->lock); - mutex_unlock(&data->lock); - return -EBADMSG; - } - - /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ - - value = -ENOMEM; - kbuf = memdup_user(buf, len); - if (IS_ERR(kbuf)) { - value = PTR_ERR(kbuf); - kbuf = NULL; - goto free1; - } - - value = ep_io (data, kbuf, len); - VDEBUG (data->dev, "%s write %zu IN, status %d\n", - data->name, len, (int) value); -free1: - mutex_unlock(&data->lock); - kfree (kbuf); - return value; -} - static int ep_release (struct inode *inode, struct file *fd) { @@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) struct ep_data *data = fd->private_data; int status; - if ((status = get_ready_ep (fd->f_flags, data)) < 0) + if ((status = get_ready_ep (fd->f_flags, data, false)) < 0) return status; spin_lock_irq (&data->dev->lock); @@ -517,8 +431,8 @@ struct kiocb_priv { struct mm_struct *mm; struct work_struct work; void *buf; - const struct iovec *iv; - unsigned long nr_segs; + struct iov_iter to; + const void *to_free; unsigned actual; }; @@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb) return value; } -static ssize_t ep_copy_to_user(struct kiocb_priv *priv) -{ - ssize_t len, total; - void *to_copy; - int i; - - /* copy stuff into user buffers */ - total = priv->actual; - len = 0; - to_copy = priv->buf; - for (i=0; i < priv->nr_segs; i++) { - ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); - - if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) { - if (len == 0) - len = -EFAULT; - break; - } - - total -= this; - len += this; - to_copy += this; - if (total == 0) - break; - } - - return len; -} - static void ep_user_copy_worker(struct work_struct *work) { struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); @@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work) size_t ret; use_mm(mm); - ret = ep_copy_to_user(priv); + ret = copy_to_iter(priv->buf, priv->actual, &priv->to); unuse_mm(mm); + if (!ret) + ret = -EFAULT; /* completing the iocb can drop the ctx and mm, don't touch mm after */ aio_complete(iocb, ret, ret); kfree(priv->buf); + kfree(priv->to_free); kfree(priv); } @@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) * don't need to copy anything to userspace, so we can * complete the aio request immediately. */ - if (priv->iv == NULL || unlikely(req->actual == 0)) { + if (priv->to_free == NULL || unlikely(req->actual == 0)) { kfree(req->buf); + kfree(priv->to_free); kfree(priv); iocb->private = NULL; /* aio_complete() reports bytes-transferred _and_ faults */ @@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) priv->buf = req->buf; priv->actual = req->actual; + INIT_WORK(&priv->work, ep_user_copy_worker); schedule_work(&priv->work); } spin_unlock(&epdata->dev->lock); @@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) put_ep(epdata); } -static ssize_t -ep_aio_rwtail( - struct kiocb *iocb, - char *buf, - size_t len, - struct ep_data *epdata, - const struct iovec *iv, - unsigned long nr_segs -) +static ssize_t ep_aio(struct kiocb *iocb, + struct kiocb_priv *priv, + struct ep_data *epdata, + char *buf, + size_t len) { - struct kiocb_priv *priv; - struct usb_request *req; - ssize_t value; + struct usb_request *req; + ssize_t value; - priv = kmalloc(sizeof *priv, GFP_KERNEL); - if (!priv) { - value = -ENOMEM; -fail: - kfree(buf); - return value; - } iocb->private = priv; priv->iocb = iocb; - priv->iv = iv; - priv->nr_segs = nr_segs; - INIT_WORK(&priv->work, ep_user_copy_worker); - - value = get_ready_ep(iocb->ki_filp->f_flags, epdata); - if (unlikely(value < 0)) { - kfree(priv); - goto fail; - } kiocb_set_cancel_fn(iocb, ep_aio_cancel); get_ep(epdata); @@ -669,75 +538,154 @@ fail: * allocate or submit those if the host disconnected. */ spin_lock_irq(&epdata->dev->lock); - if (likely(epdata->ep)) { - req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); - if (likely(req)) { - priv->req = req; - req->buf = buf; - req->length = len; - req->complete = ep_aio_complete; - req->context = iocb; - value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); - if (unlikely(0 != value)) - usb_ep_free_request(epdata->ep, req); - } else - value = -EAGAIN; - } else - value = -ENODEV; - spin_unlock_irq(&epdata->dev->lock); + value = -ENODEV; + if (unlikely(epdata->ep)) + goto fail; - mutex_unlock(&epdata->lock); + req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); + value = -ENOMEM; + if (unlikely(!req)) + goto fail; - if (unlikely(value)) { - kfree(priv); - put_ep(epdata); - } else - value = -EIOCBQUEUED; + priv->req = req; + req->buf = buf; + req->length = len; + req->complete = ep_aio_complete; + req->context = iocb; + value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); + if (unlikely(0 != value)) { + usb_ep_free_request(epdata->ep, req); + goto fail; + } + spin_unlock_irq(&epdata->dev->lock); + return -EIOCBQUEUED; + +fail: + spin_unlock_irq(&epdata->dev->lock); + kfree(priv->to_free); + kfree(priv); + put_ep(epdata); return value; } static ssize_t -ep_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t o) +ep_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct ep_data *epdata = iocb->ki_filp->private_data; - char *buf; + struct file *file = iocb->ki_filp; + struct ep_data *epdata = file->private_data; + size_t len = iov_iter_count(to); + ssize_t value; + char *buf; - if (unlikely(usb_endpoint_dir_in(&epdata->desc))) - return -EINVAL; + if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0) + return value; - buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); - if (unlikely(!buf)) - return -ENOMEM; + /* halt any endpoint by doing a "wrong direction" i/o call */ + if (usb_endpoint_dir_in(&epdata->desc)) { + if (usb_endpoint_xfer_isoc(&epdata->desc) || + !is_sync_kiocb(iocb)) { + mutex_unlock(&epdata->lock); + return -EINVAL; + } + DBG (epdata->dev, "%s halt\n", epdata->name); + spin_lock_irq(&epdata->dev->lock); + if (likely(epdata->ep != NULL)) + usb_ep_set_halt(epdata->ep); + spin_unlock_irq(&epdata->dev->lock); + mutex_unlock(&epdata->lock); + return -EBADMSG; + } - return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); + buf = kmalloc(len, GFP_KERNEL); + if (unlikely(!buf)) { + mutex_unlock(&epdata->lock); + return -ENOMEM; + } + if (is_sync_kiocb(iocb)) { + value = ep_io(epdata, buf, len); + if (value >= 0 && copy_to_iter(buf, value, to)) + value = -EFAULT; + } else { + struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); + value = -ENOMEM; + if (!priv) + goto fail; + priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); + if (!priv->to_free) { + kfree(priv); + goto fail; + } + value = ep_aio(iocb, priv, epdata, buf, len); + if (value == -EIOCBQUEUED) + buf = NULL; + } +fail: + kfree(buf); + mutex_unlock(&epdata->lock); + return value; } +static ssize_t ep_config(struct ep_data *, const char *, size_t); + static ssize_t -ep_aio_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t o) +ep_write_iter(struct kiocb *iocb, struct iov_iter *from) { - struct ep_data *epdata = iocb->ki_filp->private_data; - char *buf; - size_t len = 0; - int i = 0; + struct file *file = iocb->ki_filp; + struct ep_data *epdata = file->private_data; + size_t len = iov_iter_count(from); + bool configured; + ssize_t value; + char *buf; + + if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0) + return value; - if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) - return -EINVAL; + configured = epdata->state == STATE_EP_ENABLED; - buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); - if (unlikely(!buf)) + /* halt any endpoint by doing a "wrong direction" i/o call */ + if (configured && !usb_endpoint_dir_in(&epdata->desc)) { + if (usb_endpoint_xfer_isoc(&epdata->desc) || + !is_sync_kiocb(iocb)) { + mutex_unlock(&epdata->lock); + return -EINVAL; + } + DBG (epdata->dev, "%s halt\n", epdata->name); + spin_lock_irq(&epdata->dev->lock); + if (likely(epdata->ep != NULL)) + usb_ep_set_halt(epdata->ep); + spin_unlock_irq(&epdata->dev->lock); + mutex_unlock(&epdata->lock); + return -EBADMSG; + } + + buf = kmalloc(len, GFP_KERNEL); + if (unlikely(!buf)) { + mutex_unlock(&epdata->lock); return -ENOMEM; + } - for (i=0; i < nr_segs; i++) { - if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, - iov[i].iov_len) != 0)) { - kfree(buf); - return -EFAULT; + if (unlikely(copy_from_iter(buf, len, from) != len)) { + value = -EFAULT; + goto out; + } + + if (unlikely(!configured)) { + value = ep_config(epdata, buf, len); + } else if (is_sync_kiocb(iocb)) { + value = ep_io(epdata, buf, len); + } else { + struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); + value = -ENOMEM; + if (priv) { + value = ep_aio(iocb, priv, epdata, buf, len); + if (value == -EIOCBQUEUED) + buf = NULL; } - len += iov[i].iov_len; } - return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); +out: + kfree(buf); + mutex_unlock(&epdata->lock); + return value; } /*----------------------------------------------------------------------*/ @@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov, /* used after endpoint configuration */ static const struct file_operations ep_io_operations = { .owner = THIS_MODULE, - .llseek = no_llseek, - .read = ep_read, - .write = ep_write, - .unlocked_ioctl = ep_ioctl, + .open = ep_open, .release = ep_release, - - .aio_read = ep_aio_read, - .aio_write = ep_aio_write, + .llseek = no_llseek, + .read = new_sync_read, + .write = new_sync_write, + .unlocked_ioctl = ep_ioctl, + .read_iter = ep_read_iter, + .write_iter = ep_write_iter, }; /* ENDPOINT INITIALIZATION @@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = { * speed descriptor, then optional high speed descriptor. */ static ssize_t -ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) +ep_config (struct ep_data *data, const char *buf, size_t len) { - struct ep_data *data = fd->private_data; struct usb_ep *ep; u32 tag; int value, length = len; - value = mutex_lock_interruptible(&data->lock); - if (value < 0) - return value; - if (data->state != STATE_EP_READY) { value = -EL2HLT; goto fail; @@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) goto fail0; /* we might need to change message format someday */ - if (copy_from_user (&tag, buf, 4)) { - goto fail1; - } + memcpy(&tag, buf, 4); if (tag != 1) { DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); goto fail0; @@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) */ /* full/low speed descriptor, then high speed */ - if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { - goto fail1; - } + memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE); if (data->desc.bLength != USB_DT_ENDPOINT_SIZE || data->desc.bDescriptorType != USB_DT_ENDPOINT) goto fail0; if (len != USB_DT_ENDPOINT_SIZE) { if (len != 2 * USB_DT_ENDPOINT_SIZE) goto fail0; - if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, - USB_DT_ENDPOINT_SIZE)) { - goto fail1; - } + memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, + USB_DT_ENDPOINT_SIZE); if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE || data->hs_desc.bDescriptorType != USB_DT_ENDPOINT) { @@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) case USB_SPEED_LOW: case USB_SPEED_FULL: ep->desc = &data->desc; - value = usb_ep_enable(ep); - if (value == 0) - data->state = STATE_EP_ENABLED; break; case USB_SPEED_HIGH: /* fails if caller didn't provide that descriptor... */ ep->desc = &data->hs_desc; - value = usb_ep_enable(ep); - if (value == 0) - data->state = STATE_EP_ENABLED; break; default: DBG(data->dev, "unconnected, %s init abandoned\n", data->name); value = -EINVAL; + goto gone; } + value = usb_ep_enable(ep); if (value == 0) { - fd->f_op = &ep_io_operations; + data->state = STATE_EP_ENABLED; value = length; } gone: @@ -867,14 +800,10 @@ fail: data->desc.bDescriptorType = 0; data->hs_desc.bDescriptorType = 0; } - mutex_unlock(&data->lock); return value; fail0: value = -EINVAL; goto fail; -fail1: - value = -EFAULT; - goto fail; } static int @@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd) return value; } -/* used before endpoint configuration */ -static const struct file_operations ep_config_operations = { - .llseek = no_llseek, - - .open = ep_open, - .write = ep_config, - .release = ep_release, -}; - /*----------------------------------------------------------------------*/ /* EP0 IMPLEMENTATION can be partly in userspace. @@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) enum ep0_state state; spin_lock_irq (&dev->lock); + if (dev->state <= STATE_DEV_OPENED) { + retval = -EINVAL; + goto done; + } /* report fd mode change before acting on it */ if (dev->setup_abort) { @@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) struct dev_data *dev = fd->private_data; ssize_t retval = -ESRCH; - spin_lock_irq (&dev->lock); - /* report fd mode change before acting on it */ if (dev->setup_abort) { dev->setup_abort = 0; @@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) } else DBG (dev, "fail %s, state %d\n", __func__, dev->state); - spin_unlock_irq (&dev->lock); return retval; } @@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait) struct dev_data *dev = fd->private_data; int mask = 0; + if (dev->state <= STATE_DEV_OPENED) + return DEFAULT_POLLMASK; + poll_wait(fd, &dev->wait, wait); spin_lock_irq (&dev->lock); @@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) return ret; } -/* used after device configuration */ -static const struct file_operations ep0_io_operations = { - .owner = THIS_MODULE, - .llseek = no_llseek, - - .read = ep0_read, - .write = ep0_write, - .fasync = ep0_fasync, - .poll = ep0_poll, - .unlocked_ioctl = dev_ioctl, - .release = dev_release, -}; - /*----------------------------------------------------------------------*/ /* The in-kernel gadget driver handles most ep0 issues, in particular @@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev) goto enomem1; data->dentry = gadgetfs_create_file (dev->sb, data->name, - data, &ep_config_operations); + data, &ep_io_operations); if (!data->dentry) goto enomem2; list_add_tail (&data->epfiles, &dev->epfiles); @@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) u32 tag; char *kbuf; + spin_lock_irq(&dev->lock); + if (dev->state > STATE_DEV_OPENED) { + value = ep0_write(fd, buf, len, ptr); + spin_unlock_irq(&dev->lock); + return value; + } + spin_unlock_irq(&dev->lock); + if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) return -EINVAL; @@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) * on, they can work ... except in cleanup paths that * kick in after the ep0 descriptor is closed. */ - fd->f_op = &ep0_io_operations; value = len; } return value; @@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd) return value; } -static const struct file_operations dev_init_operations = { +static const struct file_operations ep0_operations = { .llseek = no_llseek, .open = dev_open, + .read = ep0_read, .write = dev_config, .fasync = ep0_fasync, + .poll = ep0_poll, .unlocked_ioctl = dev_ioctl, .release = dev_release, }; @@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) goto Enomem; dev->sb = sb; - dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); + dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations); if (!dev->dentry) { put_dev(dev); goto Enomem; diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index f88bfdf5b6a0..2027a27546ef 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, func = vfio_pci_set_err_trigger; break; } + break; case VFIO_PCI_REQ_IRQ_INDEX: switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { case VFIO_IRQ_SET_ACTION_TRIGGER: func = vfio_pci_set_req_trigger; break; } + break; } if (!func) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 0413157f3b49..6a356e344f82 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/balloon_compaction.h> #include <linux/oom.h> +#include <linux/wait.h> /* * Balloon device works in 4K page units. So each page is pointed to by @@ -334,17 +335,25 @@ static int virtballoon_oom_notify(struct notifier_block *self, static int balloon(void *_vballoon) { struct virtio_balloon *vb = _vballoon; + DEFINE_WAIT_FUNC(wait, woken_wake_function); set_freezable(); while (!kthread_should_stop()) { s64 diff; try_to_freeze(); - wait_event_interruptible(vb->config_change, - (diff = towards_target(vb)) != 0 - || vb->need_stats_update - || kthread_should_stop() - || freezing(current)); + + add_wait_queue(&vb->config_change, &wait); + for (;;) { + if ((diff = towards_target(vb)) != 0 || + vb->need_stats_update || + kthread_should_stop() || + freezing(current)) + break; + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } + remove_wait_queue(&vb->config_change, &wait); + if (vb->need_stats_update) stats_handle_request(vb); if (diff > 0) @@ -499,6 +508,8 @@ static int virtballoon_probe(struct virtio_device *vdev) if (err < 0) goto out_oom_notify; + virtio_device_ready(vdev); + vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { err = PTR_ERR(vb->thread); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index cad569890908..6010d7ec0a0f 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -156,22 +156,95 @@ static void vm_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - u8 *ptr = buf; - int i; + void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; + u8 b; + __le16 w; + __le32 l; - for (i = 0; i < len; i++) - ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); + if (vm_dev->version == 1) { + u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + ptr[i] = readb(base + offset + i); + return; + } + + switch (len) { + case 1: + b = readb(base + offset); + memcpy(buf, &b, sizeof b); + break; + case 2: + w = cpu_to_le16(readw(base + offset)); + memcpy(buf, &w, sizeof w); + break; + case 4: + l = cpu_to_le32(readl(base + offset)); + memcpy(buf, &l, sizeof l); + break; + case 8: + l = cpu_to_le32(readl(base + offset)); + memcpy(buf, &l, sizeof l); + l = cpu_to_le32(ioread32(base + offset + sizeof l)); + memcpy(buf + sizeof l, &l, sizeof l); + break; + default: + BUG(); + } } static void vm_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - const u8 *ptr = buf; - int i; + void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; + u8 b; + __le16 w; + __le32 l; - for (i = 0; i < len; i++) - writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); + if (vm_dev->version == 1) { + const u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + writeb(ptr[i], base + offset + i); + + return; + } + + switch (len) { + case 1: + memcpy(&b, buf, sizeof b); + writeb(b, base + offset); + break; + case 2: + memcpy(&w, buf, sizeof w); + writew(le16_to_cpu(w), base + offset); + break; + case 4: + memcpy(&l, buf, sizeof l); + writel(le32_to_cpu(l), base + offset); + break; + case 8: + memcpy(&l, buf, sizeof l); + writel(le32_to_cpu(l), base + offset); + memcpy(&l, buf + sizeof l, sizeof l); + writel(le32_to_cpu(l), base + offset + sizeof l); + break; + default: + BUG(); + } +} + +static u32 vm_generation(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + if (vm_dev->version == 1) + return 0; + else + return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION); } static u8 vm_get_status(struct virtio_device *vdev) @@ -440,6 +513,7 @@ static const char *vm_bus_name(struct virtio_device *vdev) static const struct virtio_config_ops virtio_mmio_config_ops = { .get = vm_get, .set = vm_set, + .generation = vm_generation, .get_status = vm_get_status, .set_status = vm_set_status, .reset = vm_reset, diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b4bca2d4a7e5..70fba973a107 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq) pirq_query_unmask(irq); rc = set_evtchn_to_irq(evtchn, irq); - if (rc != 0) { - pr_err("irq%d: Failed to set port to irq mapping (%d)\n", - irq, rc); - xen_evtchn_close(evtchn); - return 0; - } + if (rc) + goto err; + bind_evtchn_to_cpu(evtchn, 0); info->evtchn = evtchn; + rc = xen_evtchn_port_setup(info); + if (rc) + goto err; + out: unmask_evtchn(evtchn); eoi_pirq(irq_get_irq_data(irq)); return 0; + +err: + pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); + xen_evtchn_close(evtchn); + return 0; } static unsigned int startup_pirq(struct irq_data *data) diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 46ae0f9f02ad..75fe3d466515 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c @@ -16,7 +16,7 @@ #include "conf_space.h" #include "conf_space_quirks.h" -static bool permissive; +bool permissive; module_param(permissive, bool, 0644); /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index e56c934ad137..2e1d73d1d5d0 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h @@ -64,6 +64,8 @@ struct config_field_entry { void *data; }; +extern bool permissive; + #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) /* Add fields to a device - the add_fields macro expects to get a pointer to diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index c5ee82587e8c..2d7369391472 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c @@ -11,6 +11,10 @@ #include "pciback.h" #include "conf_space.h" +struct pci_cmd_info { + u16 val; +}; + struct pci_bar_info { u32 val; u32 len_val; @@ -20,22 +24,36 @@ struct pci_bar_info { #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) -static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) +/* Bits guests are allowed to control in permissive mode. */ +#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \ + PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \ + PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK) + +static void *command_init(struct pci_dev *dev, int offset) { - int i; - int ret; - - ret = xen_pcibk_read_config_word(dev, offset, value, data); - if (!pci_is_enabled(dev)) - return ret; - - for (i = 0; i < PCI_ROM_RESOURCE; i++) { - if (dev->resource[i].flags & IORESOURCE_IO) - *value |= PCI_COMMAND_IO; - if (dev->resource[i].flags & IORESOURCE_MEM) - *value |= PCI_COMMAND_MEMORY; + struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + int err; + + if (!cmd) + return ERR_PTR(-ENOMEM); + + err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val); + if (err) { + kfree(cmd); + return ERR_PTR(err); } + return cmd; +} + +static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) +{ + int ret = pci_read_config_word(dev, offset, value); + const struct pci_cmd_info *cmd = data; + + *value &= PCI_COMMAND_GUEST; + *value |= cmd->val & ~PCI_COMMAND_GUEST; + return ret; } @@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; + u16 val; + struct pci_cmd_info *cmd = data; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { @@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) } } + cmd->val = value; + + if (!permissive && (!dev_data || !dev_data->permissive)) + return 0; + + /* Only allow the guest to control certain bits. */ + err = pci_read_config_word(dev, offset, &val); + if (err || val == value) + return err; + + value &= PCI_COMMAND_GUEST; + value |= val & ~PCI_COMMAND_GUEST; + return pci_write_config_word(dev, offset, value); } @@ -282,6 +315,8 @@ static const struct config_field header_common[] = { { .offset = PCI_COMMAND, .size = 2, + .init = command_init, + .release = bar_release, .u.w.read = command_read, .u.w.write = command_write, }, diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ed19a7d622fa..39706c57ad3c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -890,8 +890,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) newpage = buf->page; - if (WARN_ON(!PageUptodate(newpage))) - return -EIO; + if (!PageUptodate(newpage)) + SetPageUptodate(newpage); ClearPageMappedToDisk(newpage); @@ -1353,6 +1353,17 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, return err; } +static int fuse_dev_open(struct inode *inode, struct file *file) +{ + /* + * The fuse device's file's private_data is used to hold + * the fuse_conn(ection) when it is mounted, and is used to + * keep track of whether the file has been mounted already. + */ + file->private_data = NULL; + return 0; +} + static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { @@ -1797,6 +1808,9 @@ copy_finish: static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, unsigned int size, struct fuse_copy_state *cs) { + /* Don't try to move pages (yet) */ + cs->move_pages = 0; + switch (code) { case FUSE_NOTIFY_POLL: return fuse_notify_poll(fc, size, cs); @@ -2217,6 +2231,7 @@ static int fuse_dev_fasync(int fd, struct file *file, int on) const struct file_operations fuse_dev_operations = { .owner = THIS_MODULE, + .open = fuse_dev_open, .llseek = no_llseek, .read = do_sync_read, .aio_read = fuse_dev_read, diff --git a/fs/locks.c b/fs/locks.c index f1bad681fc1c..528fedfda15e 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1728,7 +1728,7 @@ static int generic_delete_lease(struct file *filp, void *owner) break; } } - trace_generic_delete_lease(inode, fl); + trace_generic_delete_lease(inode, victim); if (victim) error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 469086b9f99b..0c3f303baf32 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { struct nilfs_inode_info *ii, *n; + int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE); int defer_iput = false; spin_lock(&nilfs->ns_inode_lock); @@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, brelse(ii->i_bh); ii->i_bh = NULL; list_del_init(&ii->i_dirty); - if (!ii->vfs_inode.i_nlink) { + if (!ii->vfs_inode.i_nlink || during_mount) { /* - * Defer calling iput() to avoid a deadlock - * over I_SYNC flag for inodes with i_nlink == 0 + * Defer calling iput() to avoid deadlocks if + * i_nlink == 0 or mount is not yet finished. */ list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); defer_iput = true; diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 9a66ff79ff27..d2f97ecca6a5 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -143,7 +143,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) return false; - if (event_mask & marks_mask & ~marks_ignored_mask) + if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask & + ~marks_ignored_mask) return true; return false; diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 8490c64d34fe..460c6c37e683 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -502,7 +502,7 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb) static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) { - if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) + if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO) return 1; return 0; } diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 20e37a3ed26f..db64ce2d4667 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -102,11 +102,11 @@ | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ - | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO) + | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \ + | OCFS2_FEATURE_INCOMPAT_APPEND_DIO) #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ - | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \ - | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) + | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) /* * Heartbeat-only devices are missing journals and other files. The @@ -179,6 +179,11 @@ #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 /* + * Append Direct IO support + */ +#define OCFS2_FEATURE_INCOMPAT_APPEND_DIO 0x8000 + +/* * backup superblock flag is used to indicate that this volume * has backup superblocks. */ @@ -200,10 +205,6 @@ #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 -/* - * Append Direct IO support - */ -#define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO 0x0008 /* The byte offset of the first backup block will be 1G. * The following will be 4G, 16G, 64G, 256G and 1T. diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index b90952f528b1..5f0d1993e6e3 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -529,8 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data) { struct ovl_fs *ufs = sb->s_fs_info; - if (!(*flags & MS_RDONLY) && - (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY))) + if (!(*flags & MS_RDONLY) && !ufs->upper_mnt) return -EROFS; return 0; @@ -615,9 +614,19 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) break; default: + pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p); return -EINVAL; } } + + /* Workdir is useless in non-upper mount */ + if (!config->upperdir && config->workdir) { + pr_info("overlayfs: option \"workdir=%s\" is useless in a non-upper mount, ignore\n", + config->workdir); + kfree(config->workdir); + config->workdir = NULL; + } + return 0; } @@ -837,7 +846,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_stack_depth = 0; if (ufs->config.upperdir) { - /* FIXME: workdir is not needed for a R/O mount */ if (!ufs->config.workdir) { pr_err("overlayfs: missing 'workdir'\n"); goto out_free_config; @@ -847,6 +855,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (err) goto out_free_config; + /* Upper fs should not be r/o */ + if (upperpath.mnt->mnt_sb->s_flags & MS_RDONLY) { + pr_err("overlayfs: upper fs is r/o, try multi-lower layers mount\n"); + err = -EINVAL; + goto out_put_upperpath; + } + err = ovl_mount_dir(ufs->config.workdir, &workpath); if (err) goto out_put_upperpath; @@ -869,8 +884,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) err = -EINVAL; stacklen = ovl_split_lowerdirs(lowertmp); - if (stacklen > OVL_MAX_STACK) + if (stacklen > OVL_MAX_STACK) { + pr_err("overlayfs: too many lower directries, limit is %d\n", + OVL_MAX_STACK); goto out_free_lowertmp; + } else if (!ufs->config.upperdir && stacklen == 1) { + pr_err("overlayfs: at least 2 lowerdir are needed while upperdir nonexistent\n"); + goto out_free_lowertmp; + } stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); if (!stack) @@ -932,8 +953,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) ufs->numlower++; } - /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ - if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) + /* If the upper fs is nonexistent, we mark overlayfs r/o too */ + if (!ufs->upper_mnt) sb->s_flags |= MS_RDONLY; sb->s_d_op = &ovl_dentry_operations; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 956b75d61809..6dee68d013ff 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1325,6 +1325,9 @@ out: static int pagemap_open(struct inode *inode, struct file *file) { + /* do not disclose physical addresses: attack vector */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " "to stop being page-shift some time soon. See the " "linux/Documentation/vm/pagemap.txt for details.\n"); diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 178525e5f430..018afb264ac2 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -58,8 +58,9 @@ struct af_alg_type { }; struct af_alg_sgl { - struct scatterlist sg[ALG_MAX_PAGES]; + struct scatterlist sg[ALG_MAX_PAGES + 1]; struct page *pages[ALG_MAX_PAGES]; + unsigned int npages; }; int af_alg_register_type(const struct af_alg_type *type); @@ -70,6 +71,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock); int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len); void af_alg_free_sg(struct af_alg_sgl *sgl); +void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new); int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h index 2fbc804e1a45..226f77246a70 100644 --- a/include/dt-bindings/pinctrl/am33xx.h +++ b/include/dt-bindings/pinctrl/am33xx.h @@ -13,7 +13,8 @@ #define PULL_DISABLE (1 << 3) #define INPUT_EN (1 << 5) -#define SLEWCTRL_FAST (1 << 6) +#define SLEWCTRL_SLOW (1 << 6) +#define SLEWCTRL_FAST 0 /* update macro depending on INPUT_EN and PULL_ENA */ #undef PIN_OUTPUT diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h index 9c2e4f82381e..5f4d01898c9c 100644 --- a/include/dt-bindings/pinctrl/am43xx.h +++ b/include/dt-bindings/pinctrl/am43xx.h @@ -18,7 +18,8 @@ #define PULL_DISABLE (1 << 16) #define PULL_UP (1 << 17) #define INPUT_EN (1 << 18) -#define SLEWCTRL_FAST (1 << 19) +#define SLEWCTRL_SLOW (1 << 19) +#define SLEWCTRL_FAST 0 #define DS0_PULL_UP_DOWN_EN (1 << 27) #define PIN_OUTPUT (PULL_DISABLE) diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 7c55dd5dd2c9..66203b268984 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -114,6 +114,7 @@ struct vgic_ops { void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); u64 (*get_eisr)(const struct kvm_vcpu *vcpu); + void (*clear_eisr)(struct kvm_vcpu *vcpu); u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); void (*enable_underflow)(struct kvm_vcpu *vcpu); void (*disable_underflow)(struct kvm_vcpu *vcpu); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a884f5a2c503..280a315de8d6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -44,7 +44,7 @@ struct bpf_map_type_list { /* function argument constraints */ enum bpf_arg_type { - ARG_ANYTHING = 0, /* any argument is ok */ + ARG_DONTCARE = 0, /* unused argument in helper function */ /* the following constraints used to prototype * bpf_map_lookup/update/delete_elem() functions @@ -58,6 +58,8 @@ enum bpf_arg_type { */ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ + + ARG_ANYTHING, /* any (initialized) argument is ok */ }; /* type of values returned from helper functions */ @@ -101,6 +103,9 @@ struct bpf_verifier_ops { * with 'type' (read or write) is allowed */ bool (*is_valid_access)(int off, int size, enum bpf_access_type type); + + u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off, + struct bpf_insn *insn); }; struct bpf_prog_type_list { @@ -131,7 +136,7 @@ struct bpf_map *bpf_map_get(struct fd f); void bpf_map_put(struct bpf_map *map); /* verify correctness of eBPF program */ -int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); #else static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl) { @@ -152,4 +157,7 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto; extern const struct bpf_func_proto bpf_map_update_elem_proto; extern const struct bpf_func_proto bpf_map_delete_elem_proto; +extern const struct bpf_func_proto bpf_get_prandom_u32_proto; +extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h index 8381bbfbc308..68c16a6bedb3 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -125,6 +125,19 @@ int clk_set_phase(struct clk *clk, int degrees); */ int clk_get_phase(struct clk *clk); +/** + * clk_is_match - check if two clk's point to the same hardware clock + * @p: clk compared against q + * @q: clk compared against p + * + * Returns true if the two struct clk pointers both point to the same hardware + * clock node. Put differently, returns true if struct clk *p and struct clk *q + * share the same struct clk_core object. + * + * Returns false otherwise. Note that two NULL clks are treated as matching. + */ +bool clk_is_match(const struct clk *p, const struct clk *q); + #else static inline long clk_get_accuracy(struct clk *clk) @@ -142,6 +155,11 @@ static inline long clk_get_phase(struct clk *clk) return -ENOTSUPP; } +static inline bool clk_is_match(const struct clk *p, const struct clk *q) +{ + return p == q; +} + #endif /** diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 439ff698000a..221025423e6c 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -43,6 +43,7 @@ enum dccp_state { DCCP_CLOSING = TCP_CLOSING, DCCP_TIME_WAIT = TCP_TIME_WAIT, DCCP_CLOSED = TCP_CLOSE, + DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV, DCCP_PARTOPEN = TCP_MAX_STATES, DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */ DCCP_MAX_STATES @@ -57,6 +58,7 @@ enum { DCCPF_CLOSING = TCPF_CLOSING, DCCPF_TIME_WAIT = TCPF_TIME_WAIT, DCCPF_CLOSED = TCPF_CLOSE, + DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV, DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN), }; @@ -317,6 +319,6 @@ static inline const char *dccp_role(const struct sock *sk) return NULL; } -extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req); +extern void dccp_syn_ack_timeout(const struct request_sock *req); #endif /* _LINUX_DCCP_H */ diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h index 40b0ab953937..8872ca103d06 100644 --- a/include/linux/ieee802154.h +++ b/include/linux/ieee802154.h @@ -30,6 +30,7 @@ #define IEEE802154_MTU 127 #define IEEE802154_ACK_PSDU_LEN 5 #define IEEE802154_MIN_PSDU_LEN 9 +#define IEEE802154_FCS_LEN 2 #define IEEE802154_PAN_ID_BROADCAST 0xffff #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff @@ -39,6 +40,7 @@ #define IEEE802154_LIFS_PERIOD 40 #define IEEE802154_SIFS_PERIOD 12 +#define IEEE802154_MAX_SIFS_FRAME_SIZE 18 #define IEEE802154_MAX_CHANNEL 26 #define IEEE802154_MAX_PAGE 31 diff --git a/include/linux/igmp.h b/include/linux/igmp.h index b5a6470e686c..2c677afeea47 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -111,9 +111,7 @@ struct ip_mc_list { extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto); extern int igmp_rcv(struct sk_buff *); -extern int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); -extern int __ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr); extern void ip_mc_drop_socket(struct sock *sk); extern int ip_mc_source(int add, int omode, struct sock *sk, diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 800544bc7bfd..781974afff9f 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -166,6 +166,11 @@ #define GITS_TRANSLATER 0x10040 +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_QUIESCENT (1U << 31) + +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) #define GITS_TYPER_PTA (1UL << 19) #define GITS_CBASER_VALID (1UL << 63) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 72ba725ddf9c..5bb074431eb0 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -5,6 +5,7 @@ struct kmem_cache; struct page; +struct vm_struct; #ifdef CONFIG_KASAN @@ -49,15 +50,11 @@ void kasan_krealloc(const void *object, size_t new_size); void kasan_slab_alloc(struct kmem_cache *s, void *object); void kasan_slab_free(struct kmem_cache *s, void *object); -#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) - int kasan_module_alloc(void *addr, size_t size); -void kasan_module_free(void *addr); +void kasan_free_shadow(const struct vm_struct *vm); #else /* CONFIG_KASAN */ -#define MODULE_ALIGN 1 - static inline void kasan_unpoison_shadow(const void *address, size_t size) {} static inline void kasan_enable_current(void) {} @@ -82,7 +79,7 @@ static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } -static inline void kasan_module_free(void *addr) {} +static inline void kasan_free_shadow(const struct vm_struct *vm) {} #endif /* CONFIG_KASAN */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 1cc54822b931..4550c67b92e4 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -205,6 +205,7 @@ enum { MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20, MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21, MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22, + MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23 }; enum { @@ -450,6 +451,21 @@ enum mlx4_module_id { MLX4_MODULE_ID_QSFP28 = 0x11, }; +enum { /* rl */ + MLX4_QP_RATE_LIMIT_NONE = 0, + MLX4_QP_RATE_LIMIT_KBS = 1, + MLX4_QP_RATE_LIMIT_MBS = 2, + MLX4_QP_RATE_LIMIT_GBS = 3 +}; + +struct mlx4_rate_limit_caps { + u16 num_rates; /* Number of different rates */ + u8 min_unit; + u16 min_val; + u8 max_unit; + u16 max_val; +}; + static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; @@ -565,6 +581,7 @@ struct mlx4_caps { u32 dmfs_high_rate_qpn_base; u32 dmfs_high_rate_qpn_range; u32 vf_caps; + struct mlx4_rate_limit_caps rl_caps; }; struct mlx4_buf_list { diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 551f85456c11..1023ebe035b7 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -207,14 +207,16 @@ struct mlx4_qp_context { __be32 msn; __be16 rq_wqe_counter; __be16 sq_wqe_counter; - u32 reserved3[2]; + u32 reserved3; + __be16 rate_limit_params; + __be16 reserved4; __be32 param3; __be32 nummmcpeers_basemkey; u8 log_page_size; - u8 reserved4[2]; + u8 reserved5[2]; u8 mtt_base_addr_h; __be32 mtt_base_addr_l; - u32 reserved5[10]; + u32 reserved6[10]; }; struct mlx4_update_qp_context { @@ -229,6 +231,7 @@ struct mlx4_update_qp_context { enum { MLX4_UPD_QP_MASK_PM_STATE = 32, MLX4_UPD_QP_MASK_VSD = 33, + MLX4_UPD_QP_MASK_RATE_LIMIT = 35, }; enum { @@ -428,7 +431,8 @@ struct mlx4_wqe_inline_seg { enum mlx4_update_qp_attr { MLX4_UPDATE_QP_SMAC = 1 << 0, MLX4_UPDATE_QP_VSD = 1 << 1, - MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 + MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2, + MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 3) - 1 }; enum mlx4_update_qp_params_flags { @@ -438,6 +442,8 @@ enum mlx4_update_qp_params_flags { struct mlx4_update_qp_params { u8 smac_index; u32 flags; + u16 rate_unit; + u16 rate_val; }; int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, diff --git a/include/linux/module.h b/include/linux/module.h index 42999fe2dbd0..b03485bcb82a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -344,6 +344,10 @@ struct module { unsigned long *ftrace_callsites; #endif +#ifdef CONFIG_LIVEPATCH + bool klp_alive; +#endif + #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ struct list_head source_list; diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index f7556261fe3c..4d0cb9bba93e 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod); /* Any cleanup before freeing mod->module_init */ void module_arch_freeing_init(struct module *mod); + +#ifdef CONFIG_KASAN +#include <linux/kasan.h> +#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) +#else +#define MODULE_ALIGN PAGE_SIZE +#endif + #endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1354ae83efc8..5ae69e7df867 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -587,6 +587,7 @@ struct netdev_queue { #ifdef CONFIG_BQL struct dql dql; #endif + unsigned long tx_maxrate; } ____cacheline_aligned_in_smp; static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) @@ -768,8 +769,6 @@ struct netdev_phys_item_id { typedef u16 (*select_queue_fallback_t)(struct net_device *dev, struct sk_buff *skb); -struct fib_info; - /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -966,9 +965,12 @@ struct fib_info; * Used to add FDB entries to dump requests. Implementers should add * entries to skb and update idx with the number of entries. * - * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) + * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, + * u16 flags) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, * struct net_device *dev, u32 filter_mask) + * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, + * u16 flags); * * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); * Called to change device carrier. Soft-devices (like dummy, team, etc) @@ -1024,23 +1026,10 @@ struct fib_info; * be otherwise expressed by feature flags. The check is called with * the set of features that the stack has calculated and it returns * those the driver believes to be appropriate. - * - * int (*ndo_switch_parent_id_get)(struct net_device *dev, - * struct netdev_phys_item_id *psid); - * Called to get an ID of the switch chip this port is part of. - * If driver implements this, it indicates that it represents a port - * of a switch chip. - * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); - * Called to notify switch device port of bridge port STP - * state change. - * int (*ndo_sw_parent_fib_ipv4_add)(struct net_device *dev, __be32 dst, - * int dst_len, struct fib_info *fi, - * u8 tos, u8 type, u32 nlflags, u32 tb_id); - * Called to add/modify IPv4 route to switch device. - * int (*ndo_sw_parent_fib_ipv4_del)(struct net_device *dev, __be32 dst, - * int dst_len, struct fib_info *fi, - * u8 tos, u8 type, u32 tb_id); - * Called to delete IPv4 route from switch device. + * int (*ndo_set_tx_maxrate)(struct net_device *dev, + * int queue_index, u32 maxrate); + * Called when a user wants to set a max-rate limitation of specific + * TX queue. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1178,6 +1167,8 @@ struct net_device_ops { bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, struct netdev_phys_item_id *ppid); + int (*ndo_get_phys_port_name)(struct net_device *dev, + char *name, size_t len); void (*ndo_add_vxlan_port)(struct net_device *dev, sa_family_t sa_family, __be16 port); @@ -1197,25 +1188,9 @@ struct net_device_ops { netdev_features_t (*ndo_features_check) (struct sk_buff *skb, struct net_device *dev, netdev_features_t features); -#ifdef CONFIG_NET_SWITCHDEV - int (*ndo_switch_parent_id_get)(struct net_device *dev, - struct netdev_phys_item_id *psid); - int (*ndo_switch_port_stp_update)(struct net_device *dev, - u8 state); - int (*ndo_switch_fib_ipv4_add)(struct net_device *dev, - __be32 dst, - int dst_len, - struct fib_info *fi, - u8 tos, u8 type, - u32 nlflags, - u32 tb_id); - int (*ndo_switch_fib_ipv4_del)(struct net_device *dev, - __be32 dst, - int dst_len, - struct fib_info *fi, - u8 tos, u8 type, - u32 tb_id); -#endif + int (*ndo_set_tx_maxrate)(struct net_device *dev, + int queue_index, + u32 maxrate); }; /** @@ -1577,6 +1552,9 @@ struct net_device { const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct forwarding_accel_ops *fwd_ops; +#ifdef CONFIG_NET_SWITCHDEV + const struct swdev_ops *swdev_ops; +#endif const struct header_ops *header_ops; @@ -1721,9 +1699,7 @@ struct net_device { struct netpoll_info __rcu *npinfo; #endif -#ifdef CONFIG_NET_NS - struct net *nd_net; -#endif + possible_net_t nd_net; /* mid-layer private */ union { @@ -1863,10 +1839,7 @@ struct net *dev_net(const struct net_device *dev) static inline void dev_net_set(struct net_device *dev, struct net *net) { -#ifdef CONFIG_NET_NS - release_net(dev->nd_net); - dev->nd_net = hold_net(net); -#endif + write_pnet(&dev->nd_net, net); } static inline bool netdev_uses_dsa(struct net_device *dev) @@ -2186,6 +2159,7 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name); int dev_alloc_name(struct net_device *dev, const char *name); int dev_open(struct net_device *dev); int dev_close(struct net_device *dev); +int dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct sk_buff *newskb); int dev_queue_xmit(struct sk_buff *skb); @@ -2979,6 +2953,8 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); +int dev_get_phys_port_name(struct net_device *dev, + char *name, size_t len); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index d449018d0726..8f2237eb3485 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -24,6 +24,7 @@ struct phy_device *of_phy_attach(struct net_device *dev, phy_interface_t iface); extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); +extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); #else /* CONFIG_OF */ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) @@ -60,6 +61,12 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np) { return NULL; } + +static inline int of_mdio_parse_addr(struct device *dev, + const struct device_node *np) +{ + return -ENOSYS; +} #endif /* CONFIG_OF */ #if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 8a860f096c35..611a691145c4 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -84,7 +84,7 @@ static inline int of_platform_populate(struct device_node *root, static inline void of_platform_depopulate(struct device *parent) { } #endif -#ifdef CONFIG_OF_DYNAMIC +#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS) extern void of_platform_register_reconfig_notifier(void); #else static inline void of_platform_register_reconfig_notifier(void) { } diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h index 72c0415d6c21..18eccefea06e 100644 --- a/include/linux/pinctrl/consumer.h +++ b/include/linux/pinctrl/consumer.h @@ -82,7 +82,7 @@ static inline int pinctrl_gpio_direction_output(unsigned gpio) static inline struct pinctrl * __must_check pinctrl_get(struct device *dev) { - return ERR_PTR(-ENOSYS); + return NULL; } static inline void pinctrl_put(struct pinctrl *p) @@ -93,7 +93,7 @@ static inline struct pinctrl_state * __must_check pinctrl_lookup_state( struct pinctrl *p, const char *name) { - return ERR_PTR(-ENOSYS); + return NULL; } static inline int pinctrl_select_state(struct pinctrl *p, @@ -104,7 +104,7 @@ static inline int pinctrl_select_state(struct pinctrl *p, static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev) { - return ERR_PTR(-ENOSYS); + return NULL; } static inline void devm_pinctrl_put(struct pinctrl *p) diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index d438eeb08bff..89d102270570 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -1,14 +1,13 @@ /* * Resizable, Scalable, Concurrent Hash Table * + * Copyright (c) 2015 Herbert Xu <[email protected]> * Copyright (c) 2014 Thomas Graf <[email protected]> * Copyright (c) 2008-2014 Patrick McHardy <[email protected]> * - * Based on the following paper by Josh Triplett, Paul E. McKenney - * and Jonathan Walpole: - * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf - * * Code partially derived from nft_hash + * Rewritten with rehash code from br_multicast plus single list + * pointer as suggested by Josh Triplett * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -19,9 +18,11 @@ #define _LINUX_RHASHTABLE_H #include <linux/compiler.h> +#include <linux/errno.h> #include <linux/list_nulls.h> #include <linux/workqueue.h> #include <linux/mutex.h> +#include <linux/rcupdate.h> /* * The end of the chain is marked with a special nulls marks which has @@ -42,6 +43,9 @@ #define RHT_HASH_BITS 27 #define RHT_BASE_SHIFT RHT_HASH_BITS +/* Base bits plus 1 bit for nulls marker */ +#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) + struct rhash_head { struct rhash_head __rcu *next; }; @@ -49,20 +53,43 @@ struct rhash_head { /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets + * @rehash: Current bucket being rehashed + * @hash_rnd: Random seed to fold into hash * @locks_mask: Mask to apply before accessing locks[] * @locks: Array of spinlocks protecting individual buckets + * @walkers: List of active walkers + * @rcu: RCU structure for freeing the table + * @future_tbl: Table under construction during rehashing * @buckets: size * hash buckets */ struct bucket_table { - size_t size; + unsigned int size; + unsigned int rehash; + u32 hash_rnd; unsigned int locks_mask; spinlock_t *locks; + struct list_head walkers; + struct rcu_head rcu; + + struct bucket_table __rcu *future_tbl; struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; +/** + * struct rhashtable_compare_arg - Key for the function rhashtable_compare + * @ht: Hash table + * @key: Key to compare against + */ +struct rhashtable_compare_arg { + struct rhashtable *ht; + const void *key; +}; + typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed); +typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg, + const void *obj); struct rhashtable; @@ -72,60 +99,54 @@ struct rhashtable; * @key_len: Length of key * @key_offset: Offset of key in struct to be hashed * @head_offset: Offset of rhash_head in struct to be hashed - * @hash_rnd: Seed to use while hashing - * @max_shift: Maximum number of shifts while expanding - * @min_shift: Minimum number of shifts while shrinking + * @max_size: Maximum size while expanding + * @min_size: Minimum size while shrinking * @nulls_base: Base value to generate nulls marker * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) * @hashfn: Function to hash key * @obj_hashfn: Function to hash object + * @obj_cmpfn: Function to compare key with object */ struct rhashtable_params { size_t nelem_hint; size_t key_len; size_t key_offset; size_t head_offset; - u32 hash_rnd; - size_t max_shift; - size_t min_shift; + unsigned int max_size; + unsigned int min_size; u32 nulls_base; size_t locks_mul; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; + rht_obj_cmpfn_t obj_cmpfn; }; /** * struct rhashtable - Hash table handle * @tbl: Bucket table - * @future_tbl: Table under construction during expansion/shrinking * @nelems: Number of elements in table - * @shift: Current size (1 << shift) * @p: Configuration parameters * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping - * @walkers: List of active walkers * @being_destroyed: True if table is set up for destruction */ struct rhashtable { struct bucket_table __rcu *tbl; - struct bucket_table __rcu *future_tbl; atomic_t nelems; - atomic_t shift; + bool being_destroyed; struct rhashtable_params p; struct work_struct run_work; struct mutex mutex; - struct list_head walkers; - bool being_destroyed; }; /** * struct rhashtable_walker - Hash table walker * @list: List entry on list of walkers - * @resize: Resize event occured + * @tbl: The table that we were walking over */ struct rhashtable_walker { struct list_head list; - bool resize; + struct bucket_table *tbl; }; /** @@ -162,6 +183,83 @@ static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr) return ((unsigned long) ptr) >> 1; } +static inline void *rht_obj(const struct rhashtable *ht, + const struct rhash_head *he) +{ + return (char *)he - ht->p.head_offset; +} + +static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, + unsigned int hash) +{ + return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); +} + +static inline unsigned int rht_key_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const void *key, const struct rhashtable_params params) +{ + return rht_bucket_index(tbl, params.hashfn(key, params.key_len ?: + ht->p.key_len, + tbl->hash_rnd)); +} + +static inline unsigned int rht_head_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const struct rhash_head *he, const struct rhashtable_params params) +{ + const char *ptr = rht_obj(ht, he); + + return likely(params.obj_hashfn) ? + rht_bucket_index(tbl, params.obj_hashfn(ptr, tbl->hash_rnd)) : + rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); +} + +/** + * rht_grow_above_75 - returns true if nelems > 0.75 * table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_grow_above_75(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + /* Expand table when exceeding 75% load */ + return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && + (!ht->p.max_size || tbl->size < ht->p.max_size); +} + +/** + * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size + * @ht: hash table + * @tbl: current table + */ +static inline bool rht_shrink_below_30(const struct rhashtable *ht, + const struct bucket_table *tbl) +{ + /* Shrink table beneath 30% load */ + return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && + tbl->size > ht->p.min_size; +} + +/* The bucket lock is selected based on the hash and protects mutations + * on a group of hash buckets. + * + * A maximum of tbl->size/2 bucket locks is allocated. This ensures that + * a single lock always covers both buckets which may both contains + * entries which link to the same bucket of the old table during resizing. + * This allows to simplify the locking as locking the bucket in both + * tables during resize always guarantee protection. + * + * IMPORTANT: When holding the bucket lock of both the old and new table + * during expansions and shrinking, the old bucket lock must always be + * acquired first. + */ +static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl, + unsigned int hash) +{ + return &tbl->locks[hash & tbl->locks_mask]; +} + #ifdef CONFIG_PROVE_LOCKING int lockdep_rht_mutex_is_held(struct rhashtable *ht); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); @@ -178,24 +276,16 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, } #endif /* CONFIG_PROVE_LOCKING */ -int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); +int rhashtable_init(struct rhashtable *ht, + const struct rhashtable_params *params); -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); +int rhashtable_insert_slow(struct rhashtable *ht, const void *key, + struct rhash_head *obj, + struct bucket_table *old_tbl); int rhashtable_expand(struct rhashtable *ht); int rhashtable_shrink(struct rhashtable *ht); -void *rhashtable_lookup(struct rhashtable *ht, const void *key); -void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, - bool (*compare)(void *, void *), void *arg); - -bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj); -bool rhashtable_lookup_compare_insert(struct rhashtable *ht, - struct rhash_head *obj, - bool (*compare)(void *, void *), - void *arg); - int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); @@ -352,4 +442,292 @@ void rhashtable_destroy(struct rhashtable *ht); rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ tbl, hash, member) +static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, + const void *obj) +{ + struct rhashtable *ht = arg->ht; + const char *ptr = obj; + + return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); +} + +/** + * rhashtable_lookup_fast - search hash table, inlined version + * @ht: hash table + * @key: the pointer to the key + * @params: hash table parameters + * + * Computes the hash value for the key and traverses the bucket chain looking + * for a entry with an identical key. The first matching entry is returned. + * + * Returns the first entry on which the compare function returned true. + */ +static inline void *rhashtable_lookup_fast( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + const struct bucket_table *tbl; + struct rhash_head *he; + unsigned hash; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); +restart: + hash = rht_key_hashfn(ht, tbl, key, params); + rht_for_each_rcu(he, tbl, hash) { + if (params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, he)) : + rhashtable_compare(&arg, rht_obj(ht, he))) + continue; + rcu_read_unlock(); + return rht_obj(ht, he); + } + + /* Ensure we see any new tables. */ + smp_rmb(); + + tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (unlikely(tbl)) + goto restart; + rcu_read_unlock(); + + return NULL; +} + +static inline int __rhashtable_insert_fast( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ + struct rhashtable_compare_arg arg = { + .ht = ht, + .key = key, + }; + int err = -EEXIST; + struct bucket_table *tbl, *new_tbl; + struct rhash_head *head; + spinlock_t *lock; + unsigned hash; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + hash = rht_head_hashfn(ht, tbl, obj, params); + lock = rht_bucket_lock(tbl, hash); + + spin_lock_bh(lock); + + /* Because we have already taken the bucket lock in tbl, + * if we find that future_tbl is not yet visible then + * that guarantees all other insertions of the same entry + * will also grab the bucket lock in tbl because until + * the rehash completes ht->tbl won't be changed. + */ + new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (unlikely(new_tbl)) { + err = rhashtable_insert_slow(ht, key, obj, new_tbl); + goto out; + } + + if (!key) + goto skip_lookup; + + rht_for_each(head, tbl, hash) { + if (unlikely(!(params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, head)) : + rhashtable_compare(&arg, rht_obj(ht, head))))) + goto out; + } + +skip_lookup: + err = 0; + + head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + + RCU_INIT_POINTER(obj->next, head); + + rcu_assign_pointer(tbl->buckets[hash], obj); + + atomic_inc(&ht->nelems); + if (rht_grow_above_75(ht, tbl)) + schedule_work(&ht->run_work); + +out: + spin_unlock_bh(lock); + rcu_read_unlock(); + + return err; +} + +/** + * rhashtable_insert_fast - insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Will take a per bucket spinlock to protect against mutual mutations + * on the same bucket. Multiple insertions may occur in parallel unless + * they map to the same bucket lock. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +static inline int rhashtable_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + return __rhashtable_insert_fast(ht, NULL, obj, params); +} + +/** + * rhashtable_lookup_insert_fast - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * This lookup function may only be used for fixed key hash table (key_len + * parameter set). It will BUG() if used inappropriately. + * + * It is safe to call this function from atomic context. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + */ +static inline int rhashtable_lookup_insert_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + const char *key = rht_obj(ht, obj); + + BUG_ON(ht->p.obj_hashfn); + + return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, + params); +} + +/** + * rhashtable_lookup_insert_key - search and insert object to hash table + * with explicit key + * @ht: hash table + * @key: key + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Locks down the bucket chain in both the old and new table if a resize + * is in progress to ensure that writers can't remove from the old table + * and can't insert to the new table during the atomic operation of search + * and insertion. Searches for duplicates in both the old and new table if + * a resize is in progress. + * + * Lookups may occur in parallel with hashtable mutations and resizing. + * + * Will trigger an automatic deferred table resizing if the size grows + * beyond the watermark indicated by grow_decision() which can be passed + * to rhashtable_init(). + * + * Returns zero on success. + */ +static inline int rhashtable_lookup_insert_key( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ + BUG_ON(!ht->p.obj_hashfn || !key); + + return __rhashtable_insert_fast(ht, key, obj, params); +} + +static inline int __rhashtable_remove_fast( + struct rhashtable *ht, struct bucket_table *tbl, + struct rhash_head *obj, const struct rhashtable_params params) +{ + struct rhash_head __rcu **pprev; + struct rhash_head *he; + spinlock_t * lock; + unsigned hash; + int err = -ENOENT; + + hash = rht_head_hashfn(ht, tbl, obj, params); + lock = rht_bucket_lock(tbl, hash); + + spin_lock_bh(lock); + + pprev = &tbl->buckets[hash]; + rht_for_each(he, tbl, hash) { + if (he != obj) { + pprev = &he->next; + continue; + } + + rcu_assign_pointer(*pprev, obj->next); + err = 0; + break; + } + + spin_unlock_bh(lock); + + return err; +} + +/** + * rhashtable_remove_fast - remove object from hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * + * Since the hash chain is single linked, the removal operation needs to + * walk the bucket chain upon removal. The removal operation is thus + * considerable slow if the hash table is not correctly sized. + * + * Will automatically shrink the table via rhashtable_expand() if the + * shrink_decision function specified at rhashtable_init() returns true. + * + * Returns zero on success, -ENOENT if the entry could not be found. + */ +static inline int rhashtable_remove_fast( + struct rhashtable *ht, struct rhash_head *obj, + const struct rhashtable_params params) +{ + struct bucket_table *tbl; + int err; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + + /* Because we have already taken (and released) the bucket + * lock in old_tbl, if we find that future_tbl is not yet + * visible then that guarantees the entry to still be in + * the old tbl if it exists. + */ + while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) && + (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) + ; + + if (err) + goto out; + + atomic_dec(&ht->nelems); + if (rht_shrink_below_30(ht, tbl)) + schedule_work(&ht->run_work); + +out: + rcu_read_unlock(); + + return err; +} + #endif /* _LINUX_RHASHTABLE_H */ diff --git a/include/linux/security.h b/include/linux/security.h index a1b7dbd127ff..25a079a7c3b3 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1716,7 +1716,6 @@ struct security_operations { int (*tun_dev_attach_queue) (void *security); int (*tun_dev_attach) (struct sock *sk, void *security); int (*tun_dev_open) (void *security); - void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -2735,8 +2734,6 @@ int security_tun_dev_attach_queue(void *security); int security_tun_dev_attach(struct sock *sk, void *security); int security_tun_dev_open(void *security); -void security_skb_owned_by(struct sk_buff *skb, struct sock *sk); - #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, struct sock *other, @@ -2928,11 +2925,6 @@ static inline int security_tun_dev_open(void *security) { return 0; } - -static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) -{ -} - #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bba1330757c0..36f3f43c0117 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -945,6 +945,13 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) to->l4_hash = from->l4_hash; }; +static inline void skb_sender_cpu_clear(struct sk_buff *skb) +{ +#ifdef CONFIG_XPS + skb->sender_cpu = 0; +#endif +} + #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index b5ad7d35a636..083ac388098e 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -19,8 +19,8 @@ void sock_diag_unregister(const struct sock_diag_handler *h); void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); -int sock_diag_check_cookie(void *sk, const __u32 *cookie); -void sock_diag_save_cookie(void *sk, __u32 *cookie); +int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie); +void sock_diag_save_cookie(struct sock *sk, __u32 *cookie); int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, diff --git a/include/linux/socket.h b/include/linux/socket.h index fab4d0ddf4ed..c9852ef7e317 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -51,6 +51,7 @@ struct msghdr { void *msg_control; /* ancillary data */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ + struct kiocb *msg_iocb; /* ptr to iocb for async requests */ }; struct user_msghdr { diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h index 85b8ee67e937..e741e8baad92 100644 --- a/include/linux/spi/cc2520.h +++ b/include/linux/spi/cc2520.h @@ -21,6 +21,7 @@ struct cc2520_platform_data { int sfd; int reset; int vreg; + bool amplified; }; #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 97dbf16f7d9d..f869ae8afbaf 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -111,7 +111,7 @@ struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; const struct tcp_request_sock_ops *af_specific; - struct sock *listener; /* needed for TFO */ + bool tfo_listener; u32 rcv_isn; u32 snt_isn; u32 snt_synack; /* synack sent time */ diff --git a/include/linux/udp.h b/include/linux/udp.h index 247cfdcc4b08..87c094961bd5 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -34,7 +34,7 @@ static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb) #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) -static inline int udp_hashfn(struct net *net, unsigned num, unsigned mask) +static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) { return (num + net_hash_mix(net)) & mask; } diff --git a/include/linux/uio.h b/include/linux/uio.h index 07a022641996..71880299ed48 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); +const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); + static inline size_t iov_iter_count(struct iov_iter *i) { return i->count; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 7d7acb35603d..0ec598381f97 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -17,6 +17,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ +#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ /* bits [20..32] reserved for arch specific ioremap internals */ /* diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 6bb97df16d2d..33a5e00025aa 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -335,6 +335,11 @@ out: int bt_to_errno(__u16 code); +void hci_sock_set_flag(struct sock *sk, int nr); +void hci_sock_clear_flag(struct sock *sk, int nr); +int hci_sock_test_flag(struct sock *sk, int nr); +unsigned short hci_sock_get_channel(struct sock *sk); + int hci_sock_init(void); void hci_sock_cleanup(void); @@ -354,6 +359,9 @@ void l2cap_exit(void); int sco_init(void); void sco_exit(void); +int mgmt_init(void); +void mgmt_exit(void); + void bt_sock_reclassify_lock(struct sock *sk, int proto); #endif /* __BLUETOOTH_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 8e54f825153c..06e7eee31ce4 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -160,6 +160,14 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_STRICT_DUPLICATE_FILTER, + + /* When this quirk is set, LE scan and BR/EDR inquiry is done + * simultaneously, otherwise it's interleaved. + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + */ + HCI_QUIRK_SIMULTANEOUS_DISCOVERY, }; /* HCI device flags */ @@ -179,13 +187,14 @@ enum { HCI_RESET, }; -/* BR/EDR and/or LE controller flags: the flags defined here should represent - * states configured via debugfs for debugging and testing purposes only. - */ +/* HCI socket flags */ enum { - HCI_DUT_MODE, - HCI_FORCE_BREDR_SMP, - HCI_FORCE_STATIC_ADDR, + HCI_SOCK_TRUSTED, + HCI_MGMT_INDEX_EVENTS, + HCI_MGMT_UNCONF_INDEX_EVENTS, + HCI_MGMT_EXT_INDEX_EVENTS, + HCI_MGMT_GENERIC_EVENTS, + HCI_MGMT_OOB_DATA_EVENTS, }; /* @@ -217,6 +226,7 @@ enum { HCI_HS_ENABLED, HCI_LE_ENABLED, HCI_ADVERTISING, + HCI_ADVERTISING_CONNECTABLE, HCI_CONNECTABLE, HCI_DISCOVERABLE, HCI_LIMITED_DISCOVERABLE, @@ -225,13 +235,13 @@ enum { HCI_FAST_CONNECTABLE, HCI_BREDR_ENABLED, HCI_LE_SCAN_INTERRUPTED, -}; -/* A mask for the flags that are supposed to remain when a reset happens - * or the HCI device is closed. - */ -#define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \ - BIT(HCI_FAST_CONNECTABLE) | BIT(HCI_LE_ADV)) + HCI_DUT_MODE, + HCI_FORCE_BREDR_SMP, + HCI_FORCE_STATIC_ADDR, + + __HCI_NUM_FLAGS, +}; /* HCI timeouts */ #define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ @@ -455,6 +465,10 @@ enum { #define EIR_SSP_HASH_C 0x0E /* Simple Pairing Hash C */ #define EIR_SSP_RAND_R 0x0F /* Simple Pairing Randomizer R */ #define EIR_DEVICE_ID 0x10 /* device ID */ +#define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */ +#define EIR_LE_ROLE 0x1C /* LE role */ +#define EIR_LE_SC_CONFIRM 0x22 /* LE SC Confirmation Value */ +#define EIR_LE_SC_RANDOM 0x23 /* LE SC Random Value */ /* Low Energy Advertising Flags */ #define LE_AD_LIMITED 0x01 /* Limited Discoverable */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index acec9140c3f9..b65c53de6a69 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -76,6 +76,7 @@ struct discovery_state { u8 last_adv_data[HCI_MAX_AD_LENGTH]; u8 last_adv_data_len; bool report_invalid_rssi; + bool result_filtering; s8 rssi; u16 uuid_count; u8 (*uuids)[16]; @@ -352,8 +353,7 @@ struct hci_dev { struct rfkill *rfkill; - unsigned long dbg_flags; - unsigned long dev_flags; + DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); struct delayed_work le_scan_disable; struct delayed_work le_scan_restart; @@ -501,6 +501,21 @@ extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; extern struct mutex hci_cb_list_lock; +#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) +#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) +#define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags) +#define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags) +#define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags) +#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags) +#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags) + +#define hci_dev_clear_volatile_flags(hdev) \ + do { \ + hci_dev_clear_flag(hdev, HCI_LE_SCAN); \ + hci_dev_clear_flag(hdev, HCI_LE_ADV); \ + hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ + } while (0) + /* ----- HCI interface to upper protocols ----- */ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); int l2cap_disconn_ind(struct hci_conn *hcon); @@ -525,6 +540,7 @@ static inline void discovery_init(struct hci_dev *hdev) static inline void hci_discovery_filter_clear(struct hci_dev *hdev) { + hdev->discovery.result_filtering = false; hdev->discovery.report_invalid_rssi = true; hdev->discovery.rssi = HCI_RSSI_INVALID; hdev->discovery.uuid_count = 0; @@ -580,7 +596,6 @@ enum { HCI_CONN_SC_ENABLED, HCI_CONN_AES_CCM, HCI_CONN_POWER_SAVE, - HCI_CONN_REMOTE_OOB, HCI_CONN_FLUSH_KEY, HCI_CONN_ENCRYPT, HCI_CONN_AUTH, @@ -596,14 +611,14 @@ enum { static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && + return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); } static inline bool hci_conn_sc_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && + return hci_dev_test_flag(hdev, HCI_SC_ENABLED) && test_bit(HCI_CONN_SC_ENABLED, &conn->flags); } @@ -965,6 +980,8 @@ struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); void hci_smp_irks_clear(struct hci_dev *hdev); +bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); + void hci_remote_oob_data_clear(struct hci_dev *hdev); struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); @@ -1021,10 +1038,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) -#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ - !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) -#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ - test_bit(HCI_SC_ENABLED, &(dev)->dev_flags)) +#define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \ + !hci_dev_test_flag(dev, HCI_AUTO_OFF)) +#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ + hci_dev_test_flag(dev, HCI_SC_ENABLED)) /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 @@ -1266,11 +1283,34 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); /* ----- HCI Sockets ----- */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, - struct sock *skip_sk); + int flag, struct sock *skip_sk); void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); void hci_sock_dev_event(struct hci_dev *hdev, int event); +#define HCI_MGMT_VAR_LEN BIT(0) +#define HCI_MGMT_NO_HDEV BIT(1) +#define HCI_MGMT_UNTRUSTED BIT(2) +#define HCI_MGMT_UNCONFIGURED BIT(3) + +struct hci_mgmt_handler { + int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len); + size_t data_len; + unsigned long flags; +}; + +struct hci_mgmt_chan { + struct list_head list; + unsigned short channel; + size_t handler_count; + const struct hci_mgmt_handler *handlers; + void (*hdev_init) (struct sock *sk, struct hci_dev *hdev); +}; + +int hci_mgmt_chan_register(struct hci_mgmt_chan *c); +void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); + /* Management interface */ #define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR)) #define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \ @@ -1290,7 +1330,6 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event); #define DISCOV_BREDR_INQUIRY_LEN 0x08 #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ -int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len); int mgmt_new_settings(struct hci_dev *hdev); void mgmt_index_added(struct hci_dev *hdev); void mgmt_index_removed(struct hci_dev *hdev); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index fe8eef00e9ca..a1a68671bf88 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -43,6 +43,8 @@ #define MGMT_STATUS_CANCELLED 0x10 #define MGMT_STATUS_INVALID_INDEX 0x11 #define MGMT_STATUS_RFKILLED 0x12 +#define MGMT_STATUS_ALREADY_PAIRED 0x13 +#define MGMT_STATUS_PERMISSION_DENIED 0x14 struct mgmt_hdr { __le16 opcode; @@ -98,6 +100,7 @@ struct mgmt_rp_read_index_list { #define MGMT_SETTING_DEBUG_KEYS 0x00001000 #define MGMT_SETTING_PRIVACY 0x00002000 #define MGMT_SETTING_CONFIGURATION 0x00004000 +#define MGMT_SETTING_STATIC_ADDRESS 0x00008000 #define MGMT_OP_READ_INFO 0x0004 #define MGMT_READ_INFO_SIZE 0 @@ -503,6 +506,39 @@ struct mgmt_cp_start_service_discovery { } __packed; #define MGMT_START_SERVICE_DISCOVERY_SIZE 4 +#define MGMT_OP_READ_LOCAL_OOB_EXT_DATA 0x003B +struct mgmt_cp_read_local_oob_ext_data { + __u8 type; +} __packed; +#define MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE 1 +struct mgmt_rp_read_local_oob_ext_data { + __u8 type; + __le16 eir_len; + __u8 eir[0]; +} __packed; + +#define MGMT_OP_READ_EXT_INDEX_LIST 0x003C +#define MGMT_READ_EXT_INDEX_LIST_SIZE 0 +struct mgmt_rp_read_ext_index_list { + __le16 num_controllers; + struct { + __le16 index; + __u8 type; + __u8 bus; + } entry[0]; +} __packed; + +#define MGMT_OP_READ_ADV_FEATURES 0x0003D +#define MGMT_READ_ADV_FEATURES_SIZE 0 +struct mgmt_rp_read_adv_features { + __le32 supported_flags; + __u8 max_adv_data_len; + __u8 max_scan_rsp_len; + __u8 max_instances; + __u8 num_instances; + __u8 instance[0]; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; @@ -690,3 +726,19 @@ struct mgmt_ev_new_conn_param { #define MGMT_EV_UNCONF_INDEX_REMOVED 0x001e #define MGMT_EV_NEW_CONFIG_OPTIONS 0x001f + +struct mgmt_ev_ext_index { + __u8 type; + __u8 bus; +} __packed; + +#define MGMT_EV_EXT_INDEX_ADDED 0x0020 + +#define MGMT_EV_EXT_INDEX_REMOVED 0x0021 + +#define MGMT_EV_LOCAL_OOB_DATA_UPDATED 0x0022 +struct mgmt_ev_local_oob_data_updated { + __u8 type; + __le16 eir_len; + __u8 eir[0]; +} __packed; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 64e09e1e8099..f977abec07f6 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3183,10 +3183,8 @@ struct wiphy { const struct ieee80211_ht_cap *ht_capa_mod_mask; const struct ieee80211_vht_cap *vht_capa_mod_mask; -#ifdef CONFIG_NET_NS /* the network namespace this phy lives in currently */ - struct net *_net; -#endif + possible_net_t _net; #ifdef CONFIG_CFG80211_WEXT const struct iw_handler_def *wext; diff --git a/include/net/dst.h b/include/net/dst.h index a8ae4e760778..0fb99a26e973 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -481,6 +481,7 @@ void dst_init(void); enum { XFRM_LOOKUP_ICMP = 1 << 0, XFRM_LOOKUP_QUEUE = 1 << 1, + XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, }; struct flowi; diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index e584de16e4c3..6d67383a5114 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -58,7 +58,7 @@ struct fib_rules_ops { struct sk_buff *, struct fib_rule_hdr *, struct nlattr **); - void (*delete)(struct fib_rule *); + int (*delete)(struct fib_rule *); int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **); @@ -95,17 +95,10 @@ static inline void fib_rule_get(struct fib_rule *rule) atomic_inc(&rule->refcnt); } -static inline void fib_rule_put_rcu(struct rcu_head *head) -{ - struct fib_rule *rule = container_of(head, struct fib_rule, rcu); - release_net(rule->fr_net); - kfree(rule); -} - static inline void fib_rule_put(struct fib_rule *rule) { if (atomic_dec_and_test(&rule->refcnt)) - call_rcu(&rule->rcu, fib_rule_put_rcu); + kfree_rcu(rule, rcu); } static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla) diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 0574abd3db86..a9af1cc8c1bc 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -92,9 +92,7 @@ struct genl_info { struct genlmsghdr * genlhdr; void * userhdr; struct nlattr ** attrs; -#ifdef CONFIG_NET_NS - struct net * _net; -#endif + possible_net_t _net; void * user_ptr[2]; struct sock * dst_sk; }; diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index 74af137304be..6d539e4e5ba7 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h @@ -28,8 +28,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6, const struct request_sock *req); -struct request_sock *inet6_csk_search_req(const struct sock *sk, - struct request_sock ***prevp, +struct request_sock *inet6_csk_search_req(struct sock *sk, const __be16 rport, const struct in6_addr *raddr, const struct in6_addr *laddr, diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 9201afe083fa..7ff588ca6817 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -38,8 +38,6 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash, return jhash_3words(lhash, fhash, ports, initval); } -int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp); - /* * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index b9a6b0a94cc6..7b5887cd1172 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -256,8 +256,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk, struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); -struct request_sock *inet_csk_search_req(const struct sock *sk, - struct request_sock ***prevp, +struct request_sock *inet_csk_search_req(struct sock *sk, const __be16 rport, const __be32 raddr, const __be32 laddr); @@ -283,15 +282,13 @@ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, static inline void inet_csk_reqsk_queue_removed(struct sock *sk, struct request_sock *req) { - if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0) - inet_csk_delete_keepalive_timer(sk); + reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); } static inline void inet_csk_reqsk_queue_added(struct sock *sk, const unsigned long timeout) { - if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0) - inet_csk_reset_keepalive_timer(sk, timeout); + reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); } static inline int inet_csk_reqsk_queue_len(const struct sock *sk) @@ -310,26 +307,19 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) } static inline void inet_csk_reqsk_queue_unlink(struct sock *sk, - struct request_sock *req, - struct request_sock **prev) + struct request_sock *req) { - reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev); + reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req); } static inline void inet_csk_reqsk_queue_drop(struct sock *sk, - struct request_sock *req, - struct request_sock **prev) + struct request_sock *req) { - inet_csk_reqsk_queue_unlink(sk, req, prev); + inet_csk_reqsk_queue_unlink(sk, req); inet_csk_reqsk_queue_removed(sk, req); - reqsk_free(req); + reqsk_put(req); } -void inet_csk_reqsk_queue_prune(struct sock *parent, - const unsigned long interval, - const unsigned long timeout, - const unsigned long max_rto); - void inet_csk_destroy_sock(struct sock *sk); void inet_csk_prepare_forced_close(struct sock *sk); diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index dd1950a7e273..73fe0f9525d9 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -76,9 +76,7 @@ struct inet_ehash_bucket { * ports are created in O(1) time? I thought so. ;-) -DaveM */ struct inet_bind_bucket { -#ifdef CONFIG_NET_NS - struct net *ib_net; -#endif + possible_net_t ib_net; unsigned short port; signed char fastreuse; signed char fastreuseport; @@ -223,8 +221,8 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb); -static inline int inet_bhashfn(struct net *net, const __u16 lport, - const int bhash_size) +static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, + const u32 bhash_size) { return (lport + net_hash_mix(net)) & (bhash_size - 1); } @@ -233,7 +231,7 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, const unsigned short snum); /* These can have wildcards, don't try too hard. */ -static inline int inet_lhashfn(struct net *net, const unsigned short num) +static inline u32 inet_lhashfn(const struct net *net, const unsigned short num) { return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1); } @@ -251,6 +249,7 @@ void inet_put_port(struct sock *sk); void inet_hashinfo_init(struct inet_hashinfo *h); int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw); +int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw); void inet_hash(struct sock *sk); void inet_unhash(struct sock *sk); @@ -385,13 +384,32 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, iph->daddr, dport, inet_iif(skb)); } +u32 sk_ehashfn(const struct sock *sk); +u32 inet6_ehashfn(const struct net *net, + const struct in6_addr *laddr, const u16 lport, + const struct in6_addr *faddr, const __be16 fport); + +static inline void sk_daddr_set(struct sock *sk, __be32 addr) +{ + sk->sk_daddr = addr; /* alias of inet_daddr */ +#if IS_ENABLED(CONFIG_IPV6) + ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr); +#endif +} + +static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) +{ + sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */ +#if IS_ENABLED(CONFIG_IPV6) + ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr); +#endif +} + int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u32 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, - struct inet_timewait_sock **), - int (*hash)(struct sock *sk, - struct inet_timewait_sock *twp)); + struct inet_timewait_sock **)); int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index eb16c7beed1e..b6c3737da4e9 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -27,6 +27,7 @@ #include <net/sock.h> #include <net/request_sock.h> #include <net/netns/hash.h> +#include <net/tcp_states.h> /** struct ip_options - IP Options * @@ -77,6 +78,10 @@ struct inet_request_sock { #define ir_v6_rmt_addr req.__req_common.skc_v6_daddr #define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr #define ir_iif req.__req_common.skc_bound_dev_if +#define ir_cookie req.__req_common.skc_cookie +#define ireq_net req.__req_common.skc_net +#define ireq_state req.__req_common.skc_state +#define ireq_family req.__req_common.skc_family kmemcheck_bitfield_begin(flags); u16 snd_wscale : 4, @@ -88,11 +93,11 @@ struct inet_request_sock { acked : 1, no_srccheck: 1; kmemcheck_bitfield_end(flags); + u32 ir_mark; union { struct ip_options_rcu *opt; struct sk_buff *pktopts; }; - u32 ir_mark; }; static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) @@ -100,13 +105,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) return (struct inet_request_sock *)sk; } -static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb) +static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) { - if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) { + if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) return skb->mark; - } else { - return sk->sk_mark; - } + + return sk->sk_mark; } struct inet_cork { @@ -239,18 +243,8 @@ static inline unsigned int __inet_ehashfn(const __be32 laddr, initval); } -static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops) -{ - struct request_sock *req = reqsk_alloc(ops); - struct inet_request_sock *ireq = inet_rsk(req); - - if (req != NULL) { - kmemcheck_annotate_bitfield(ireq, flags); - ireq->opt = NULL; - } - - return req; -} +struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk_listener); static inline __u8 inet_sk_flowi_flags(const struct sock *sk) { diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 6c566034e26d..b7ce1003c429 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -122,6 +122,7 @@ struct inet_timewait_sock { #define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr #define tw_dport __tw_common.skc_dport #define tw_num __tw_common.skc_num +#define tw_cookie __tw_common.skc_cookie int tw_timeout; volatile unsigned char tw_substate; diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 1657604c5dd3..54271ed0ed45 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -186,7 +186,8 @@ struct fib_table { int tb_default; int tb_num_default; struct rcu_head rcu; - unsigned long tb_data[0]; + unsigned long *tb_data; + unsigned long __data[0]; }; int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, @@ -196,11 +197,10 @@ int fib_table_delete(struct fib_table *, struct fib_config *); int fib_table_dump(struct fib_table *table, struct sk_buff *skb, struct netlink_callback *cb); int fib_table_flush(struct fib_table *table); +struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); void fib_table_flush_external(struct fib_table *table); void fib_free_table(struct fib_table *tb); - - #ifndef CONFIG_IP_MULTIPLE_TABLES #define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1)) @@ -229,18 +229,13 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp, struct fib_result *res) { struct fib_table *tb; - int err; + int err = -ENETUNREACH; rcu_read_lock(); - for (err = 0; !err; err = -ENETUNREACH) { - tb = fib_get_table(net, RT_TABLE_LOCAL); - if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) - break; - tb = fib_get_table(net, RT_TABLE_MAIN); - if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) - break; - } + tb = fib_get_table(net, RT_TABLE_MAIN); + if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) + err = 0; rcu_read_unlock(); @@ -270,10 +265,6 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp, res->tclassid = 0; for (err = 0; !err; err = -ENETUNREACH) { - tb = rcu_dereference_rtnl(net->ipv4.fib_local); - if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) - break; - tb = rcu_dereference_rtnl(net->ipv4.fib_main); if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) break; @@ -309,6 +300,7 @@ static inline int fib_num_tclassid_users(struct net *net) return 0; } #endif +int fib_unmerge(struct net *net); void fib_flush_external(struct net *net); /* Exported by fib_semantics.c */ @@ -320,7 +312,7 @@ void fib_select_multipath(struct fib_result *res); /* Exported by fib_trie.c */ void fib_trie_init(void); -struct fib_table *fib_trie_table(u32 id); +struct fib_table *fib_trie_table(u32 id, struct fib_table *alias); static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) { diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 20fd23398537..4e3731ee4eac 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -47,13 +47,13 @@ static inline struct net *skb_net(const struct sk_buff *skb) * Start with the most likely hit * End with BUG */ - if (likely(skb->dev && skb->dev->nd_net)) + if (likely(skb->dev && dev_net(skb->dev))) return dev_net(skb->dev); if (skb_dst(skb) && skb_dst(skb)->dev) return dev_net(skb_dst(skb)->dev); WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", __func__, __LINE__); - if (likely(skb->sk && skb->sk->sk_net)) + if (likely(skb->sk && sock_net(skb->sk))) return sock_net(skb->sk); pr_err("There is no net ptr to find in the skb in %s() line:%d\n", __func__, __LINE__); @@ -71,11 +71,11 @@ static inline struct net *skb_sknet(const struct sk_buff *skb) #ifdef CONFIG_NET_NS #ifdef CONFIG_IP_VS_DEBUG /* Start with the most likely hit */ - if (likely(skb->sk && skb->sk->sk_net)) + if (likely(skb->sk && sock_net(skb->sk))) return sock_net(skb->sk); WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n", __func__, __LINE__); - if (likely(skb->dev && skb->dev->nd_net)) + if (likely(skb->dev && dev_net(skb->dev))) return dev_net(skb->dev); pr_err("There is no net ptr to find in the skb in %s() line:%d\n", __func__, __LINE__); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index b7673065c074..e7ba9758a345 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -942,10 +942,6 @@ void ipv6_sysctl_unregister(void); int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr); -int __ipv6_sock_mc_join(struct sock *sk, int ifindex, - const struct in6_addr *addr); int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); -int __ipv6_sock_mc_drop(struct sock *sk, int ifindex, - const struct in6_addr *addr); #endif /* _NET_IPV6_H */ diff --git a/include/net/neighbour.h b/include/net/neighbour.h index d48b8ec8b5f4..bd33e66f49aa 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -42,6 +42,7 @@ enum { NEIGH_VAR_MCAST_PROBES, NEIGH_VAR_UCAST_PROBES, NEIGH_VAR_APP_PROBES, + NEIGH_VAR_MCAST_REPROBES, NEIGH_VAR_RETRANS_TIME, NEIGH_VAR_BASE_REACHABLE_TIME, NEIGH_VAR_DELAY_PROBE_TIME, @@ -65,9 +66,7 @@ enum { }; struct neigh_parms { -#ifdef CONFIG_NET_NS - struct net *net; -#endif + possible_net_t net; struct net_device *dev; struct list_head list; int (*neigh_setup)(struct neighbour *); @@ -167,9 +166,7 @@ struct neigh_ops { struct pneigh_entry { struct pneigh_entry *next; -#ifdef CONFIG_NET_NS - struct net *net; -#endif + possible_net_t net; struct net_device *dev; u8 flags; u8 key[0]; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 2cb9acb618e9..f733656404de 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -49,13 +49,10 @@ struct net { atomic_t count; /* To decided when the network * namespace should be shut down. */ -#ifdef NETNS_REFCNT_DEBUG - atomic_t use_count; /* To track references we - * destroy on demand - */ -#endif spinlock_t rules_mod_lock; + atomic64_t cookie_gen; + struct list_head list; /* list of network namespaces */ struct list_head cleanup_list; /* namespaces on death row */ struct list_head exit_list; /* Use only net_mutex */ @@ -234,48 +231,27 @@ int net_eq(const struct net *net1, const struct net *net2) #endif -#ifdef NETNS_REFCNT_DEBUG -static inline struct net *hold_net(struct net *net) -{ - if (net) - atomic_inc(&net->use_count); - return net; -} - -static inline void release_net(struct net *net) -{ - if (net) - atomic_dec(&net->use_count); -} -#else -static inline struct net *hold_net(struct net *net) -{ - return net; -} - -static inline void release_net(struct net *net) -{ -} -#endif - +typedef struct { #ifdef CONFIG_NET_NS + struct net *net; +#endif +} possible_net_t; -static inline void write_pnet(struct net **pnet, struct net *net) +static inline void write_pnet(possible_net_t *pnet, struct net *net) { - *pnet = net; +#ifdef CONFIG_NET_NS + pnet->net = net; +#endif } -static inline struct net *read_pnet(struct net * const *pnet) +static inline struct net *read_pnet(const possible_net_t *pnet) { - return *pnet; -} - +#ifdef CONFIG_NET_NS + return pnet->net; #else - -#define write_pnet(pnet, net) do { (void)(net);} while (0) -#define read_pnet(pnet) (&init_net) - + return &init_net; #endif +} #define for_each_net(VAR) \ list_for_each_entry(VAR, &net_namespace_list, list) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 74f271a172dd..095433b8a8b0 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -95,9 +95,8 @@ struct nf_conn { /* Timer function; drops refcnt when it goes off. */ struct timer_list timeout; -#ifdef CONFIG_NET_NS - struct net *ct_net; -#endif + possible_net_t ct_net; + /* all members below initialized via memset */ u8 __nfct_init_offset[0]; diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h index c06ac58ca107..69a6715d9f3f 100644 --- a/include/net/netns/hash.h +++ b/include/net/netns/hash.h @@ -5,7 +5,7 @@ struct net; -static inline unsigned int net_hash_mix(struct net *net) +static inline u32 net_hash_mix(const struct net *net) { #ifdef CONFIG_NET_NS /* @@ -13,7 +13,7 @@ static inline unsigned int net_hash_mix(struct net *net) * always zeroed */ - return (unsigned)(((unsigned long)net) >> L1_CACHE_SHIFT); + return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT); #else return 0; #endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 8f3a1a1a5a94..614a49be68a9 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -54,8 +54,6 @@ struct netns_ipv4 { struct sock *mc_autojoin_sk; struct inet_peer_base *peers; - struct tcpm_hash_bucket *tcp_metrics_hash; - unsigned int tcp_metrics_hash_log; struct sock * __percpu *tcp_sk; struct netns_frags frags; #ifdef CONFIG_NETFILTER diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 7f830ff67f08..fe41f3ceb008 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -39,8 +39,7 @@ struct request_sock_ops { void (*send_reset)(struct sock *sk, struct sk_buff *skb); void (*destructor)(struct request_sock *req); - void (*syn_ack_timeout)(struct sock *sk, - struct request_sock *req); + void (*syn_ack_timeout)(const struct request_sock *req); }; int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); @@ -49,7 +48,11 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req); */ struct request_sock { struct sock_common __req_common; +#define rsk_refcnt __req_common.skc_refcnt +#define rsk_hash __req_common.skc_hash + struct request_sock *dl_next; + struct sock *rsk_listener; u16 mss; u8 num_retrans; /* number of retransmits */ u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ @@ -58,32 +61,56 @@ struct request_sock { u32 window_clamp; /* window clamp at creation time */ u32 rcv_wnd; /* rcv_wnd offered first time */ u32 ts_recent; - unsigned long expires; + struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; u32 secid; u32 peer_secid; }; -static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops) +static inline struct request_sock * +reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener) { struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); - if (req != NULL) + if (req) { req->rsk_ops = ops; - + sock_hold(sk_listener); + req->rsk_listener = sk_listener; + + /* Following is temporary. It is coupled with debugging + * helpers in reqsk_put() & reqsk_free() + */ + atomic_set(&req->rsk_refcnt, 0); + } return req; } -static inline void __reqsk_free(struct request_sock *req) +static inline struct request_sock *inet_reqsk(struct sock *sk) { - kmem_cache_free(req->rsk_ops->slab, req); + return (struct request_sock *)sk; +} + +static inline struct sock *req_to_sk(struct request_sock *req) +{ + return (struct sock *)req; } static inline void reqsk_free(struct request_sock *req) { + /* temporary debugging */ + WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0); + req->rsk_ops->destructor(req); - __reqsk_free(req); + if (req->rsk_listener) + sock_put(req->rsk_listener); + kmem_cache_free(req->rsk_ops->slab, req); +} + +static inline void reqsk_put(struct request_sock *req) +{ + if (atomic_dec_and_test(&req->rsk_refcnt)) + reqsk_free(req); } extern int sysctl_max_syn_backlog; @@ -93,12 +120,16 @@ extern int sysctl_max_syn_backlog; * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs */ struct listen_sock { - u8 max_qlen_log; + int qlen_inc; /* protected by listener lock */ + int young_inc;/* protected by listener lock */ + + /* following fields can be updated by timer */ + atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */ + atomic_t young_dec; + + u8 max_qlen_log ____cacheline_aligned_in_smp; u8 synflood_warned; /* 2 bytes hole, try to use */ - int qlen; - int qlen_young; - int clock_hand; u32 hash_rnd; u32 nr_table_entries; struct request_sock *syn_table[0]; @@ -142,18 +173,11 @@ struct fastopen_queue { * %syn_wait_lock is necessary only to avoid proc interface having to grab the main * lock sock while browsing the listening hash (otherwise it's deadlock prone). * - * This lock is acquired in read mode only from listening_get_next() seq_file - * op and it's acquired in write mode _only_ from code that is actively - * changing rskq_accept_head. All readers that are holding the master sock lock - * don't need to grab this lock in read mode too as rskq_accept_head. writes - * are always protected from the main sock lock. */ struct request_sock_queue { struct request_sock *rskq_accept_head; struct request_sock *rskq_accept_tail; - rwlock_t syn_wait_lock; u8 rskq_defer_accept; - /* 3 bytes hole, try to pack */ struct listen_sock *listen_opt; struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been * enabled on this listener. Check @@ -161,6 +185,9 @@ struct request_sock_queue { * to determine if TFO is enabled * right at this moment. */ + + /* temporary alignment, our goal is to get rid of this lock */ + spinlock_t syn_wait_lock ____cacheline_aligned_in_smp; }; int reqsk_queue_alloc(struct request_sock_queue *queue, @@ -186,12 +213,21 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue) } static inline void reqsk_queue_unlink(struct request_sock_queue *queue, - struct request_sock *req, - struct request_sock **prev_req) + struct request_sock *req) { - write_lock(&queue->syn_wait_lock); - *prev_req = req->dl_next; - write_unlock(&queue->syn_wait_lock); + struct listen_sock *lopt = queue->listen_opt; + struct request_sock **prev; + + spin_lock(&queue->syn_wait_lock); + + prev = &lopt->syn_table[req->rsk_hash]; + while (*prev != req) + prev = &(*prev)->dl_next; + *prev = req->dl_next; + + spin_unlock(&queue->syn_wait_lock); + if (del_timer(&req->rsk_timer)) + reqsk_put(req); } static inline void reqsk_queue_add(struct request_sock_queue *queue, @@ -224,57 +260,53 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue return req; } -static inline int reqsk_queue_removed(struct request_sock_queue *queue, - struct request_sock *req) +static inline void reqsk_queue_removed(struct request_sock_queue *queue, + const struct request_sock *req) { struct listen_sock *lopt = queue->listen_opt; if (req->num_timeout == 0) - --lopt->qlen_young; - - return --lopt->qlen; + atomic_inc(&lopt->young_dec); + atomic_inc(&lopt->qlen_dec); } -static inline int reqsk_queue_added(struct request_sock_queue *queue) +static inline void reqsk_queue_added(struct request_sock_queue *queue) { struct listen_sock *lopt = queue->listen_opt; - const int prev_qlen = lopt->qlen; - lopt->qlen_young++; - lopt->qlen++; - return prev_qlen; + lopt->young_inc++; + lopt->qlen_inc++; } -static inline int reqsk_queue_len(const struct request_sock_queue *queue) +static inline int listen_sock_qlen(const struct listen_sock *lopt) { - return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; + return lopt->qlen_inc - atomic_read(&lopt->qlen_dec); } -static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) +static inline int listen_sock_young(const struct listen_sock *lopt) { - return queue->listen_opt->qlen_young; + return lopt->young_inc - atomic_read(&lopt->young_dec); } -static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) +static inline int reqsk_queue_len(const struct request_sock_queue *queue) { - return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; + const struct listen_sock *lopt = queue->listen_opt; + + return lopt ? listen_sock_qlen(lopt) : 0; } -static inline void reqsk_queue_hash_req(struct request_sock_queue *queue, - u32 hash, struct request_sock *req, - unsigned long timeout) +static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) { - struct listen_sock *lopt = queue->listen_opt; - - req->expires = jiffies + timeout; - req->num_retrans = 0; - req->num_timeout = 0; - req->sk = NULL; - req->dl_next = lopt->syn_table[hash]; + return listen_sock_young(queue->listen_opt); +} - write_lock(&queue->syn_wait_lock); - lopt->syn_table[hash] = req; - write_unlock(&queue->syn_wait_lock); +static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) +{ + return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log; } +void reqsk_queue_hash_req(struct request_sock_queue *queue, + u32 hash, struct request_sock *req, + unsigned long timeout); + #endif /* _REQUEST_SOCK_H */ diff --git a/include/net/sock.h b/include/net/sock.h index 250822cc1e02..3f9b8ce56948 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -67,6 +67,7 @@ #include <linux/atomic.h> #include <net/dst.h> #include <net/checksum.h> +#include <net/tcp_states.h> #include <linux/net_tstamp.h> struct cgroup; @@ -190,15 +191,15 @@ struct sock_common { struct hlist_nulls_node skc_portaddr_node; }; struct proto *skc_prot; -#ifdef CONFIG_NET_NS - struct net *skc_net; -#endif + possible_net_t skc_net; #if IS_ENABLED(CONFIG_IPV6) struct in6_addr skc_v6_daddr; struct in6_addr skc_v6_rcv_saddr; #endif + atomic64_t skc_cookie; + /* * fields between dontcopy_begin/dontcopy_end * are not copied in sock_copy() @@ -329,6 +330,7 @@ struct sock { #define sk_net __sk_common.skc_net #define sk_v6_daddr __sk_common.skc_v6_daddr #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr +#define sk_cookie __sk_common.skc_cookie socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; @@ -403,8 +405,8 @@ struct sock { rwlock_t sk_callback_lock; int sk_err, sk_err_soft; - unsigned short sk_ack_backlog; - unsigned short sk_max_ack_backlog; + u32 sk_ack_backlog; + u32 sk_max_ack_backlog; __u32 sk_priority; #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) __u32 sk_cgrp_prioidx; @@ -1624,7 +1626,7 @@ static inline void sock_put(struct sock *sk) sk_free(sk); } /* Generic version of sock_put(), dealing with all sockets - * (TCP_TIMEWAIT, ESTABLISHED...) + * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...) */ void sock_gen_put(struct sock *sk); @@ -2201,7 +2203,7 @@ static inline void sk_change_net(struct sock *sk, struct net *net) if (!net_eq(current_net, net)) { put_net(current_net); - sock_net_set(sk, hold_net(net)); + sock_net_set(sk, net); } } @@ -2217,6 +2219,14 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb) return NULL; } +/* This helper checks if a socket is a full socket, + * ie _not_ a timewait or request socket. + */ +static inline bool sk_fullsock(const struct sock *sk) +{ + return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); +} + void sock_enable_timestamp(struct sock *sk, int flag); int sock_get_timestamp(struct sock *, struct timeval __user *); int sock_get_timestampns(struct sock *, struct timespec __user *); diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 1a9382febcc3..d2e69ee3019a 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -14,6 +14,35 @@ #include <linux/netdevice.h> #include <linux/notifier.h> +struct fib_info; + +/** + * struct switchdev_ops - switchdev operations + * + * @swdev_parent_id_get: Called to get an ID of the switch chip this port + * is part of. If driver implements this, it indicates that it + * represents a port of a switch chip. + * + * @swdev_port_stp_update: Called to notify switch device port of bridge + * port STP state change. + * + * @swdev_fib_ipv4_add: Called to add/modify IPv4 route to switch device. + * + * @swdev_fib_ipv4_del: Called to delete IPv4 route from switch device. + */ +struct swdev_ops { + int (*swdev_parent_id_get)(struct net_device *dev, + struct netdev_phys_item_id *psid); + int (*swdev_port_stp_update)(struct net_device *dev, u8 state); + int (*swdev_fib_ipv4_add)(struct net_device *dev, __be32 dst, + int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 nlflags, + u32 tb_id); + int (*swdev_fib_ipv4_del)(struct net_device *dev, __be32 dst, + int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 tb_id); +}; + enum netdev_switch_notifier_type { NETDEV_SWITCH_FDB_ADD = 1, NETDEV_SWITCH_FDB_DEL, diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h index 86a070ffc930..a152e9858b2c 100644 --- a/include/net/tc_act/tc_bpf.h +++ b/include/net/tc_act/tc_bpf.h @@ -16,8 +16,12 @@ struct tcf_bpf { struct tcf_common common; struct bpf_prog *filter; + union { + u32 bpf_fd; + u16 bpf_num_ops; + }; struct sock_filter *bpf_ops; - u16 bpf_num_ops; + const char *bpf_name; }; #define to_bpf(a) \ container_of(a->priv, struct tcf_bpf, common) diff --git a/include/net/tcp.h b/include/net/tcp.h index 2e11e38205c2..fe60e00e1919 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -406,8 +406,7 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, const struct tcphdr *th); struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, - struct request_sock *req, struct request_sock **prev, - bool fastopen); + struct request_sock *req, bool fastopen); int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); void tcp_enter_loss(struct sock *sk); @@ -434,7 +433,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname, int compat_tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); void tcp_set_keepalive(struct sock *sk, int val); -void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); +void tcp_syn_ack_timeout(const struct request_sock *req); int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); void tcp_parse_options(const struct sk_buff *skb, @@ -448,6 +447,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); +void tcp_req_err(struct sock *sk, u32 seq); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, @@ -1137,31 +1137,6 @@ static inline int tcp_full_space(const struct sock *sk) return tcp_win_from_space(sk->sk_rcvbuf); } -static inline void tcp_openreq_init(struct request_sock *req, - struct tcp_options_received *rx_opt, - struct sk_buff *skb, struct sock *sk) -{ - struct inet_request_sock *ireq = inet_rsk(req); - - req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ - req->cookie_ts = 0; - tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; - tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; - tcp_rsk(req)->snt_synack = tcp_time_stamp; - tcp_rsk(req)->last_oow_ack_time = 0; - req->mss = rx_opt->mss_clamp; - req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; - ireq->tstamp_ok = rx_opt->tstamp_ok; - ireq->sack_ok = rx_opt->sack_ok; - ireq->snd_wscale = rx_opt->snd_wscale; - ireq->wscale_ok = rx_opt->wscale_ok; - ireq->acked = 0; - ireq->ecn_ok = 0; - ireq->ir_rmt_port = tcp_hdr(skb)->source; - ireq->ir_num = ntohs(tcp_hdr(skb)->dest); - ireq->ir_mark = inet_request_mark(sk, skb); -} - extern void tcp_openreq_init_rwin(struct request_sock *req, struct sock *sk, struct dst_entry *dst); @@ -1241,36 +1216,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, return true; } -/* Return true if we're currently rate-limiting out-of-window ACKs and - * thus shouldn't send a dupack right now. We rate-limit dupacks in - * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS - * attacks that send repeated SYNs or ACKs for the same connection. To - * do this, we do not send a duplicate SYNACK or ACK if the remote - * endpoint is sending out-of-window SYNs or pure ACKs at a high rate. - */ -static inline bool tcp_oow_rate_limited(struct net *net, - const struct sk_buff *skb, - int mib_idx, u32 *last_oow_ack_time) -{ - /* Data packets without SYNs are not likely part of an ACK loop. */ - if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && - !tcp_hdr(skb)->syn) - goto not_rate_limited; - - if (*last_oow_ack_time) { - s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); - - if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { - NET_INC_STATS_BH(net, mib_idx); - return true; /* rate-limited: don't send yet! */ - } - } - - *last_oow_ack_time = tcp_time_stamp; - -not_rate_limited: - return false; /* not rate-limited: go ahead, send dupack now! */ -} +bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, + int mib_idx, u32 *last_oow_ack_time); static inline void tcp_mib_init(struct net *net) { diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h index b0b645988bd8..50e78a74d0df 100644 --- a/include/net/tcp_states.h +++ b/include/net/tcp_states.h @@ -25,6 +25,7 @@ enum { TCP_LAST_ACK, TCP_LISTEN, TCP_CLOSING, /* Now a valid state */ + TCP_NEW_SYN_RECV, TCP_MAX_STATES /* Leave at the end! */ }; @@ -44,7 +45,8 @@ enum { TCPF_CLOSE_WAIT = (1 << 8), TCPF_LAST_ACK = (1 << 9), TCPF_LISTEN = (1 << 10), - TCPF_CLOSING = (1 << 11) + TCPF_CLOSING = (1 << 11), + TCPF_NEW_SYN_RECV = (1 << 12), }; #endif /* _LINUX_TCP_STATES_H */ diff --git a/include/net/vxlan.h b/include/net/vxlan.h index eabd3a038674..756e4636bad8 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -91,6 +91,7 @@ struct vxlanhdr { #define VXLAN_N_VID (1u << 24) #define VXLAN_VID_MASK (VXLAN_N_VID - 1) +#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8) #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) struct vxlan_metadata { @@ -130,7 +131,7 @@ struct vxlan_sock { #define VXLAN_F_GBP 0x800 #define VXLAN_F_REMCSUM_NOPARTIAL 0x1000 -/* Flags that are used in the receive patch. These flags must match in +/* Flags that are used in the receive path. These flags must match in * order for a socket to be shareable */ #define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index dc4865e90fe4..d0ac7d7be8a7 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -126,9 +126,7 @@ struct xfrm_state_walk { /* Full description of state of transformer. */ struct xfrm_state { -#ifdef CONFIG_NET_NS - struct net *xs_net; -#endif + possible_net_t xs_net; union { struct hlist_node gclist; struct hlist_node bydst; @@ -522,9 +520,7 @@ struct xfrm_policy_queue { }; struct xfrm_policy { -#ifdef CONFIG_NET_NS - struct net *xp_net; -#endif + possible_net_t xp_net; struct hlist_node bydst; struct hlist_node byidx; diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h index 0210797abf2e..dc10c52e0e91 100644 --- a/include/soc/at91/at91sam9_ddrsdr.h +++ b/include/soc/at91/at91sam9_ddrsdr.h @@ -92,7 +92,7 @@ #define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ #define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ -#define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */ +#define AT91_DDRSDRC_MD (7 << 0) /* Memory Device Type */ #define AT91_DDRSDRC_MD_SDR 0 #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 3fa1af8a58d7..3dd314a45d0d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -119,6 +119,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, BPF_PROG_TYPE_SOCKET_FILTER, BPF_PROG_TYPE_SCHED_CLS, + BPF_PROG_TYPE_SCHED_ACT, }; #define BPF_PSEUDO_MAP_FD 1 @@ -165,7 +166,22 @@ enum bpf_func_id { BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */ BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */ BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */ + BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */ + BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */ __BPF_FUNC_MAX_ID, }; +/* user accessible mirror of in-kernel sk_buff. + * new fields can only be added to the end of this structure + */ +struct __sk_buff { + __u32 len; + __u32 pkt_type; + __u32 mark; + __u32 queue_mapping; + __u32 protocol; + __u32 vlan_present; + __u32 vlan_tci; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 756436e1ce89..f5f5edd5ae5f 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -147,6 +147,7 @@ enum { IFLA_CARRIER_CHANGES, IFLA_PHYS_SWITCH_ID, IFLA_LINK_NETNSID, + IFLA_PHYS_PORT_NAME, __IFLA_MAX }; @@ -224,6 +225,9 @@ enum { IFLA_BR_FORWARD_DELAY, IFLA_BR_HELLO_TIME, IFLA_BR_MAX_AGE, + IFLA_BR_AGEING_TIME, + IFLA_BR_STP_STATE, + IFLA_BR_PRIORITY, __IFLA_BR_MAX, }; diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h index da2d668b8cf1..053bd102fbe0 100644 --- a/include/uapi/linux/if_packet.h +++ b/include/uapi/linux/if_packet.h @@ -99,6 +99,7 @@ struct tpacket_auxdata { #define TP_STATUS_VLAN_VALID (1 << 4) /* auxdata has valid tp_vlan_tci */ #define TP_STATUS_BLK_TMO (1 << 5) #define TP_STATUS_VLAN_TPID_VALID (1 << 6) /* auxdata has valid tp_vlan_tpid */ +#define TP_STATUS_CSUM_VALID (1 << 7) /* Tx ring - header status */ #define TP_STATUS_AVAILABLE 0 diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 3873a35509aa..2e35c61bbdd1 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -126,6 +126,7 @@ enum { NDTPA_PROXY_QLEN, /* u32 */ NDTPA_LOCKTIME, /* u64, msecs */ NDTPA_QUEUE_LENBYTES, /* u32 */ + NDTPA_MCAST_REPROBES, /* u32 */ __NDTPA_MAX }; #define NDTPA_MAX (__NDTPA_MAX - 1) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index c3722b024e73..bea910f924dd 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -305,6 +305,7 @@ enum rtattr_type_t { RTA_MFC_STATS, RTA_VIA, RTA_NEWDST, + RTA_PREF, __RTA_MAX }; diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h index 5288bd77e63b..07f17cc70bb3 100644 --- a/include/uapi/linux/tc_act/tc_bpf.h +++ b/include/uapi/linux/tc_act/tc_bpf.h @@ -24,6 +24,8 @@ enum { TCA_ACT_BPF_PARMS, TCA_ACT_BPF_OPS_LEN, TCA_ACT_BPF_OPS, + TCA_ACT_BPF_FD, + TCA_ACT_BPF_NAME, __TCA_ACT_BPF_MAX, }; #define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1) diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index 3c53eec4ae22..19c66fcbab8a 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h @@ -60,7 +60,7 @@ struct virtio_blk_config { __u32 size_max; /* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */ __u32 seg_max; - /* geometry the device (if VIRTIO_BLK_F_GEOMETRY) */ + /* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */ struct virtio_blk_geometry { __u16 cylinders; __u8 heads; @@ -119,7 +119,11 @@ struct virtio_blk_config { #define VIRTIO_BLK_T_BARRIER 0x80000000 #endif /* !VIRTIO_BLK_NO_LEGACY */ -/* This is the first element of the read scatter-gather list. */ +/* + * This comes first in the read scatter-gather list. + * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, + * this is the first element of the read scatter-gather list. + */ struct virtio_blk_outhdr { /* VIRTIO_BLK_T* */ __virtio32 type; diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h index 42b9370771b0..cc18ef8825c0 100644 --- a/include/uapi/linux/virtio_scsi.h +++ b/include/uapi/linux/virtio_scsi.h @@ -29,8 +29,16 @@ #include <linux/virtio_types.h> -#define VIRTIO_SCSI_CDB_SIZE 32 -#define VIRTIO_SCSI_SENSE_SIZE 96 +/* Default values of the CDB and sense data size configuration fields */ +#define VIRTIO_SCSI_CDB_DEFAULT_SIZE 32 +#define VIRTIO_SCSI_SENSE_DEFAULT_SIZE 96 + +#ifndef VIRTIO_SCSI_CDB_SIZE +#define VIRTIO_SCSI_CDB_SIZE VIRTIO_SCSI_CDB_DEFAULT_SIZE +#endif +#ifndef VIRTIO_SCSI_SENSE_SIZE +#define VIRTIO_SCSI_SENSE_SIZE VIRTIO_SCSI_SENSE_DEFAULT_SIZE +#endif /* SCSI command request, followed by data-out */ struct virtio_scsi_cmd_req { diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index b78f21caf55a..b0f1c9e5d687 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -114,9 +114,9 @@ int __must_check __xenbus_register_backend(struct xenbus_driver *drv, const char *mod_name); #define xenbus_register_frontend(drv) \ - __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); + __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME) #define xenbus_register_backend(drv) \ - __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); + __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME) void xenbus_unregister_driver(struct xenbus_driver *drv); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 50603aec766a..4139a0f8b558 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -661,6 +661,9 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; const struct bpf_func_proto bpf_map_update_elem_proto __weak; const struct bpf_func_proto bpf_map_delete_elem_proto __weak; +const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; +const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; + /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call * skb_copy_bits(), so provide a weak definition of it for NET-less config. */ diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index a3c7701a8b5e..bd7f5988ed9c 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -11,6 +11,8 @@ */ #include <linux/bpf.h> #include <linux/rcupdate.h> +#include <linux/random.h> +#include <linux/smp.h> /* If kernel subsystem is allowing eBPF programs to call this function, * inside its own verifier_ops->get_func_proto() callback it should return @@ -87,3 +89,25 @@ const struct bpf_func_proto bpf_map_delete_elem_proto = { .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, }; + +static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + return prandom_u32(); +} + +const struct bpf_func_proto bpf_get_prandom_u32_proto = { + .func = bpf_get_prandom_u32, + .gpl_only = false, + .ret_type = RET_INTEGER, +}; + +static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + return raw_smp_processor_id(); +} + +const struct bpf_func_proto bpf_get_smp_processor_id_proto = { + .func = bpf_get_smp_processor_id, + .gpl_only = false, + .ret_type = RET_INTEGER, +}; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 669719ccc9ee..ea75c654af1b 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -519,7 +519,7 @@ static int bpf_prog_load(union bpf_attr *attr) goto free_prog; /* run eBPF verifier */ - err = bpf_check(prog, attr); + err = bpf_check(&prog, attr); if (err < 0) goto free_used_maps; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bdf4192a889b..0e714f799ec0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno, enum bpf_reg_type expected_type; int err = 0; - if (arg_type == ARG_ANYTHING) + if (arg_type == ARG_DONTCARE) return 0; if (reg->type == NOT_INIT) { @@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno, return -EACCES; } + if (arg_type == ARG_ANYTHING) + return 0; + if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; @@ -1177,6 +1180,7 @@ static bool may_access_skb(enum bpf_prog_type type) switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; @@ -1617,11 +1621,10 @@ static int do_check(struct verifier_env *env) return err; } else if (class == BPF_LDX) { - if (BPF_MODE(insn->code) != BPF_MEM || - insn->imm != 0) { - verbose("BPF_LDX uses reserved fields\n"); - return -EINVAL; - } + enum bpf_reg_type src_reg_type; + + /* check for reserved fields is already done */ + /* check src operand */ err = check_reg_arg(regs, insn->src_reg, SRC_OP); if (err) @@ -1640,6 +1643,29 @@ static int do_check(struct verifier_env *env) if (err) return err; + src_reg_type = regs[insn->src_reg].type; + + if (insn->imm == 0 && BPF_SIZE(insn->code) == BPF_W) { + /* saw a valid insn + * dst_reg = *(u32 *)(src_reg + off) + * use reserved 'imm' field to mark this insn + */ + insn->imm = src_reg_type; + + } else if (src_reg_type != insn->imm && + (src_reg_type == PTR_TO_CTX || + insn->imm == PTR_TO_CTX)) { + /* ABuser program is trying to use the same insn + * dst_reg = *(u32*) (src_reg + off) + * with different pointer types: + * src_reg == ctx in one branch and + * src_reg == stack|map in some other branch. + * Reject it. + */ + verbose("same insn cannot be used with different pointers\n"); + return -EINVAL; + } + } else if (class == BPF_STX) { if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn); @@ -1787,6 +1813,13 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) int i, j; for (i = 0; i < insn_cnt; i++, insn++) { + if (BPF_CLASS(insn->code) == BPF_LDX && + (BPF_MODE(insn->code) != BPF_MEM || + insn->imm != 0)) { + verbose("BPF_LDX uses reserved fields\n"); + return -EINVAL; + } + if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; @@ -1878,6 +1911,92 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env) insn->src_reg = 0; } +static void adjust_branches(struct bpf_prog *prog, int pos, int delta) +{ + struct bpf_insn *insn = prog->insnsi; + int insn_cnt = prog->len; + int i; + + for (i = 0; i < insn_cnt; i++, insn++) { + if (BPF_CLASS(insn->code) != BPF_JMP || + BPF_OP(insn->code) == BPF_CALL || + BPF_OP(insn->code) == BPF_EXIT) + continue; + + /* adjust offset of jmps if necessary */ + if (i < pos && i + insn->off + 1 > pos) + insn->off += delta; + else if (i > pos && i + insn->off + 1 < pos) + insn->off -= delta; + } +} + +/* convert load instructions that access fields of 'struct __sk_buff' + * into sequence of instructions that access fields of 'struct sk_buff' + */ +static int convert_ctx_accesses(struct verifier_env *env) +{ + struct bpf_insn *insn = env->prog->insnsi; + int insn_cnt = env->prog->len; + struct bpf_insn insn_buf[16]; + struct bpf_prog *new_prog; + u32 cnt; + int i; + + if (!env->prog->aux->ops->convert_ctx_access) + return 0; + + for (i = 0; i < insn_cnt; i++, insn++) { + if (insn->code != (BPF_LDX | BPF_MEM | BPF_W)) + continue; + + if (insn->imm != PTR_TO_CTX) { + /* clear internal mark */ + insn->imm = 0; + continue; + } + + cnt = env->prog->aux->ops-> + convert_ctx_access(insn->dst_reg, insn->src_reg, + insn->off, insn_buf); + if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { + verbose("bpf verifier is misconfigured\n"); + return -EINVAL; + } + + if (cnt == 1) { + memcpy(insn, insn_buf, sizeof(*insn)); + continue; + } + + /* several new insns need to be inserted. Make room for them */ + insn_cnt += cnt - 1; + new_prog = bpf_prog_realloc(env->prog, + bpf_prog_size(insn_cnt), + GFP_USER); + if (!new_prog) + return -ENOMEM; + + new_prog->len = insn_cnt; + + memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1, + sizeof(*insn) * (insn_cnt - i - cnt)); + + /* copy substitute insns in place of load instruction */ + memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt); + + /* adjust branches in the whole program */ + adjust_branches(new_prog, i, cnt - 1); + + /* keep walking new program and skip insns we just inserted */ + env->prog = new_prog; + insn = new_prog->insnsi + i + cnt - 1; + i += cnt - 1; + } + + return 0; +} + static void free_states(struct verifier_env *env) { struct verifier_state_list *sl, *sln; @@ -1900,13 +2019,13 @@ static void free_states(struct verifier_env *env) kfree(env->explored_states); } -int bpf_check(struct bpf_prog *prog, union bpf_attr *attr) +int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) { char __user *log_ubuf = NULL; struct verifier_env *env; int ret = -EINVAL; - if (prog->len <= 0 || prog->len > BPF_MAXINSNS) + if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS) return -E2BIG; /* 'struct verifier_env' can be global, but since it's not small, @@ -1916,7 +2035,7 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr) if (!env) return -ENOMEM; - env->prog = prog; + env->prog = *prog; /* grab the mutex to protect few globals used by verifier */ mutex_lock(&bpf_verifier_lock); @@ -1948,7 +2067,7 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr) if (ret < 0) goto skip_full_check; - env->explored_states = kcalloc(prog->len, + env->explored_states = kcalloc(env->prog->len, sizeof(struct verifier_state_list *), GFP_USER); ret = -ENOMEM; @@ -1965,6 +2084,10 @@ skip_full_check: while (pop_stack(env, NULL) >= 0); free_states(env); + if (ret == 0) + /* program is valid, convert *(u32*)(ctx + off) accesses */ + ret = convert_ctx_accesses(env); + if (log_level && log_len >= log_size - 1) { BUG_ON(log_len >= log_size); /* verifier log exceeded user supplied buffer */ @@ -1980,18 +2103,18 @@ skip_full_check: if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ - prog->aux->used_maps = kmalloc_array(env->used_map_cnt, - sizeof(env->used_maps[0]), - GFP_KERNEL); + env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, + sizeof(env->used_maps[0]), + GFP_KERNEL); - if (!prog->aux->used_maps) { + if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto free_log_buf; } - memcpy(prog->aux->used_maps, env->used_maps, + memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); - prog->aux->used_map_cnt = env->used_map_cnt; + env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions @@ -2003,11 +2126,12 @@ free_log_buf: if (log_level) vfree(log_buf); free_env: - if (!prog->aux->used_maps) + if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_bpf_prog_info() will release them. */ release_maps(env); + *prog = env->prog; kfree(env); mutex_unlock(&bpf_verifier_lock); return ret; diff --git a/kernel/events/core.c b/kernel/events/core.c index f04daabfd1cf..453ef61311d4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3591,7 +3591,7 @@ static void put_event(struct perf_event *event) ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING); WARN_ON_ONCE(ctx->parent_ctx); perf_remove_from_context(event, true); - mutex_unlock(&ctx->mutex); + perf_event_ctx_unlock(event, ctx); _free_event(event); } diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 01ca08804f51..3f9f1d6b4c2e 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -89,16 +89,28 @@ static bool klp_is_object_loaded(struct klp_object *obj) /* sets obj->mod if object is not vmlinux and module is found */ static void klp_find_object_module(struct klp_object *obj) { + struct module *mod; + if (!klp_is_module(obj)) return; mutex_lock(&module_mutex); /* - * We don't need to take a reference on the module here because we have - * the klp_mutex, which is also taken by the module notifier. This - * prevents any module from unloading until we release the klp_mutex. + * We do not want to block removal of patched modules and therefore + * we do not take a reference here. The patches are removed by + * a going module handler instead. + */ + mod = find_module(obj->name); + /* + * Do not mess work of the module coming and going notifiers. + * Note that the patch might still be needed before the going handler + * is called. Module functions can be called even in the GOING state + * until mod->exit() finishes. This is especially important for + * patches that modify semantic of the functions. */ - obj->mod = find_module(obj->name); + if (mod && mod->klp_alive) + obj->mod = mod; + mutex_unlock(&module_mutex); } @@ -767,6 +779,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) return -EINVAL; obj->state = KLP_DISABLED; + obj->mod = NULL; klp_find_object_module(obj); @@ -961,6 +974,15 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action, mutex_lock(&klp_mutex); + /* + * Each module has to know that the notifier has been called. + * We never know what module will get patched by a new patch. + */ + if (action == MODULE_STATE_COMING) + mod->klp_alive = true; + else /* MODULE_STATE_GOING */ + mod->klp_alive = false; + list_for_each_entry(patch, &klp_patches, list) { for (obj = patch->objs; obj->funcs; obj++) { if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) diff --git a/kernel/module.c b/kernel/module.c index cc93cf68653c..b3d634ed06c9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -56,7 +56,6 @@ #include <linux/async.h> #include <linux/percpu.h> #include <linux/kmemleak.h> -#include <linux/kasan.h> #include <linux/jump_label.h> #include <linux/pfn.h> #include <linux/bsearch.h> @@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { } void __weak module_memfree(void *module_region) { vfree(module_region); - kasan_module_free(module_region); } void __weak module_arch_cleanup(struct module *mod) diff --git a/lib/Makefile b/lib/Makefile index 87eb3bffc283..58f74d2dd396 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -24,7 +24,7 @@ obj-y += lockref.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ - gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ + gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o obj-y += string_helpers.o diff --git a/mm/iov_iter.c b/lib/iov_iter.c index 827732047da1..9d96e283520c 100644 --- a/mm/iov_iter.c +++ b/lib/iov_iter.c @@ -751,3 +751,18 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) return npages; } EXPORT_SYMBOL(iov_iter_npages); + +const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) +{ + *new = *old; + if (new->type & ITER_BVEC) + return new->bvec = kmemdup(new->bvec, + new->nr_segs * sizeof(struct bio_vec), + flags); + else + /* iovec and kvec have identical layout */ + return new->iov = kmemdup(new->iov, + new->nr_segs * sizeof(struct iovec), + flags); +} +EXPORT_SYMBOL(dup_iter); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index b5344ef4c684..83cfedd6612a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -1,13 +1,13 @@ /* * Resizable, Scalable, Concurrent Hash Table * + * Copyright (c) 2015 Herbert Xu <[email protected]> * Copyright (c) 2014-2015 Thomas Graf <[email protected]> * Copyright (c) 2008-2014 Patrick McHardy <[email protected]> * - * Based on the following paper: - * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf - * * Code partially derived from nft_hash + * Rewritten with rehash code from br_multicast plus single list + * pointer as suggested by Josh Triplett * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -27,121 +27,18 @@ #include <linux/err.h> #define HASH_DEFAULT_SIZE 64UL -#define HASH_MIN_SIZE 4UL +#define HASH_MIN_SIZE 4U #define BUCKET_LOCKS_PER_CPU 128UL -/* Base bits plus 1 bit for nulls marker */ -#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) - -enum { - RHT_LOCK_NORMAL, - RHT_LOCK_NESTED, -}; - -/* The bucket lock is selected based on the hash and protects mutations - * on a group of hash buckets. - * - * A maximum of tbl->size/2 bucket locks is allocated. This ensures that - * a single lock always covers both buckets which may both contains - * entries which link to the same bucket of the old table during resizing. - * This allows to simplify the locking as locking the bucket in both - * tables during resize always guarantee protection. - * - * IMPORTANT: When holding the bucket lock of both the old and new table - * during expansions and shrinking, the old bucket lock must always be - * acquired first. - */ -static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) -{ - return &tbl->locks[hash & tbl->locks_mask]; -} - -static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) -{ - return (void *) he - ht->p.head_offset; -} - -static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) -{ - return hash & (tbl->size - 1); -} - -static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) -{ - u32 hash; - - if (unlikely(!ht->p.key_len)) - hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); - else - hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, - ht->p.hash_rnd); - - return hash >> HASH_RESERVED_SPACE; -} - -static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) -{ - return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE; -} - -static u32 head_hashfn(const struct rhashtable *ht, +static u32 head_hashfn(struct rhashtable *ht, const struct bucket_table *tbl, const struct rhash_head *he) { - return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); + return rht_head_hashfn(ht, tbl, he, ht->p); } #ifdef CONFIG_PROVE_LOCKING -static void debug_dump_buckets(const struct rhashtable *ht, - const struct bucket_table *tbl) -{ - struct rhash_head *he; - unsigned int i, hash; - - for (i = 0; i < tbl->size; i++) { - pr_warn(" [Bucket %d] ", i); - rht_for_each_rcu(he, tbl, i) { - hash = head_hashfn(ht, tbl, he); - pr_cont("[hash = %#x, lock = %p] ", - hash, bucket_lock(tbl, hash)); - } - pr_cont("\n"); - } - -} - -static void debug_dump_table(struct rhashtable *ht, - const struct bucket_table *tbl, - unsigned int hash) -{ - struct bucket_table *old_tbl, *future_tbl; - - pr_emerg("BUG: lock for hash %#x in table %p not held\n", - hash, tbl); - - rcu_read_lock(); - future_tbl = rht_dereference_rcu(ht->future_tbl, ht); - old_tbl = rht_dereference_rcu(ht->tbl, ht); - if (future_tbl != old_tbl) { - pr_warn("Future table %p (size: %zd)\n", - future_tbl, future_tbl->size); - debug_dump_buckets(ht, future_tbl); - } - - pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size); - debug_dump_buckets(ht, old_tbl); - - rcu_read_unlock(); -} - #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) -#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \ - do { \ - if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \ - debug_dump_table(HT, TBL, HASH); \ - BUG(); \ - } \ - } while (0) int lockdep_rht_mutex_is_held(struct rhashtable *ht) { @@ -151,29 +48,16 @@ EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { - spinlock_t *lock = bucket_lock(tbl, hash); + spinlock_t *lock = rht_bucket_lock(tbl, hash); return (debug_locks) ? lockdep_is_held(lock) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #else #define ASSERT_RHT_MUTEX(HT) -#define ASSERT_BUCKET_LOCK(HT, TBL, HASH) #endif -static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) -{ - struct rhash_head __rcu **pprev; - - for (pprev = &tbl->buckets[n]; - !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); - pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) - ; - - return pprev; -} - static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) { unsigned int i, size; @@ -215,6 +99,11 @@ static void bucket_table_free(const struct bucket_table *tbl) kvfree(tbl); } +static void bucket_table_free_rcu(struct rcu_head *head) +{ + bucket_table_free(container_of(head, struct bucket_table, rcu)); +} + static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, size_t nbuckets) { @@ -237,131 +126,112 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, return NULL; } + INIT_LIST_HEAD(&tbl->walkers); + + get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); + for (i = 0; i < nbuckets; i++) INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); return tbl; } -/** - * rht_grow_above_75 - returns true if nelems > 0.75 * table-size - * @ht: hash table - * @new_size: new table size - */ -static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) +static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) { - /* Expand table when exceeding 75% load */ - return atomic_read(&ht->nelems) > (new_size / 4 * 3) && - (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift); -} + struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *new_tbl = + rht_dereference(old_tbl->future_tbl, ht) ?: old_tbl; + struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; + int err = -ENOENT; + struct rhash_head *head, *next, *entry; + spinlock_t *new_bucket_lock; + unsigned new_hash; -/** - * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size - * @ht: hash table - * @new_size: new table size - */ -static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) -{ - /* Shrink table beneath 30% load */ - return atomic_read(&ht->nelems) < (new_size * 3 / 10) && - (atomic_read(&ht->shift) > ht->p.min_shift); -} + rht_for_each(entry, old_tbl, old_hash) { + err = 0; + next = rht_dereference_bucket(entry->next, old_tbl, old_hash); -static void lock_buckets(struct bucket_table *new_tbl, - struct bucket_table *old_tbl, unsigned int hash) - __acquires(old_bucket_lock) -{ - spin_lock_bh(bucket_lock(old_tbl, hash)); - if (new_tbl != old_tbl) - spin_lock_bh_nested(bucket_lock(new_tbl, hash), - RHT_LOCK_NESTED); -} + if (rht_is_a_nulls(next)) + break; -static void unlock_buckets(struct bucket_table *new_tbl, - struct bucket_table *old_tbl, unsigned int hash) - __releases(old_bucket_lock) -{ - if (new_tbl != old_tbl) - spin_unlock_bh(bucket_lock(new_tbl, hash)); - spin_unlock_bh(bucket_lock(old_tbl, hash)); -} + pprev = &entry->next; + } -/** - * Unlink entries on bucket which hash to different bucket. - * - * Returns true if no more work needs to be performed on the bucket. - */ -static bool hashtable_chain_unzip(struct rhashtable *ht, - const struct bucket_table *new_tbl, - struct bucket_table *old_tbl, - size_t old_hash) -{ - struct rhash_head *he, *p, *next; - unsigned int new_hash, new_hash2; + if (err) + goto out; - ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash); + new_hash = head_hashfn(ht, new_tbl, entry); - /* Old bucket empty, no work needed. */ - p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, - old_hash); - if (rht_is_a_nulls(p)) - return false; + new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); - new_hash = head_hashfn(ht, new_tbl, p); - ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); + spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); + head = rht_dereference_bucket(new_tbl->buckets[new_hash], + new_tbl, new_hash); - /* Advance the old bucket pointer one or more times until it - * reaches a node that doesn't hash to the same bucket as the - * previous node p. Call the previous node p; - */ - rht_for_each_continue(he, p->next, old_tbl, old_hash) { - new_hash2 = head_hashfn(ht, new_tbl, he); - ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2); + if (rht_is_a_nulls(head)) + INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash); + else + RCU_INIT_POINTER(entry->next, head); - if (new_hash != new_hash2) - break; - p = he; - } - rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); + rcu_assign_pointer(new_tbl->buckets[new_hash], entry); + spin_unlock(new_bucket_lock); - /* Find the subsequent node which does hash to the same - * bucket as node P, or NULL if no such node exists. - */ - INIT_RHT_NULLS_HEAD(next, ht, old_hash); - if (!rht_is_a_nulls(he)) { - rht_for_each_continue(he, he->next, old_tbl, old_hash) { - if (head_hashfn(ht, new_tbl, he) == new_hash) { - next = he; - break; - } - } - } + rcu_assign_pointer(*pprev, next); - /* Set p's next pointer to that subsequent node pointer, - * bypassing the nodes which do not hash to p's bucket - */ - rcu_assign_pointer(p->next, next); +out: + return err; +} + +static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash) +{ + struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); + spinlock_t *old_bucket_lock; - p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, - old_hash); + old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); - return !rht_is_a_nulls(p); + spin_lock_bh(old_bucket_lock); + while (!rhashtable_rehash_one(ht, old_hash)) + ; + old_tbl->rehash++; + spin_unlock_bh(old_bucket_lock); } -static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl, - unsigned int new_hash, struct rhash_head *entry) +static void rhashtable_rehash(struct rhashtable *ht, + struct bucket_table *new_tbl) { - ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); + struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); + struct rhashtable_walker *walker; + unsigned old_hash; + + /* Make insertions go into the new, empty table right away. Deletions + * and lookups will be attempted in both tables until we synchronize. + */ + rcu_assign_pointer(old_tbl->future_tbl, new_tbl); + + /* Ensure the new table is visible to readers. */ + smp_wmb(); + + for (old_hash = 0; old_hash < old_tbl->size; old_hash++) + rhashtable_rehash_chain(ht, old_hash); + + /* Publish the new table pointer. */ + rcu_assign_pointer(ht->tbl, new_tbl); + + list_for_each_entry(walker, &old_tbl->walkers, list) + walker->tbl = NULL; - rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); + /* Wait for readers. All new readers will see the new + * table, and thus no references to the old table will + * remain. + */ + call_rcu(&old_tbl->rcu, bucket_table_free_rcu); } /** * rhashtable_expand - Expand hash table while allowing concurrent lookups * @ht: the hash table to expand * - * A secondary bucket array is allocated and the hash entries are migrated - * while keeping them on both lists until the end of the RCU grace period. + * A secondary bucket array is allocated and the hash entries are migrated. * * This function may only be called in a context where it is safe to call * synchronize_rcu(), e.g. not within a rcu_read_lock() section. @@ -375,9 +245,6 @@ static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl, int rhashtable_expand(struct rhashtable *ht) { struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); - struct rhash_head *he; - unsigned int new_hash, old_hash; - bool complete = false; ASSERT_RHT_MUTEX(ht); @@ -385,66 +252,7 @@ int rhashtable_expand(struct rhashtable *ht) if (new_tbl == NULL) return -ENOMEM; - atomic_inc(&ht->shift); - - /* Make insertions go into the new, empty table right away. Deletions - * and lookups will be attempted in both tables until we synchronize. - * The synchronize_rcu() guarantees for the new table to be picked up - * so no new additions go into the old table while we relink. - */ - rcu_assign_pointer(ht->future_tbl, new_tbl); - synchronize_rcu(); - - /* For each new bucket, search the corresponding old bucket for the - * first entry that hashes to the new bucket, and link the end of - * newly formed bucket chain (containing entries added to future - * table) to that entry. Since all the entries which will end up in - * the new bucket appear in the same old bucket, this constructs an - * entirely valid new hash table, but with multiple buckets - * "zipped" together into a single imprecise chain. - */ - for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { - old_hash = rht_bucket_index(old_tbl, new_hash); - lock_buckets(new_tbl, old_tbl, new_hash); - rht_for_each(he, old_tbl, old_hash) { - if (head_hashfn(ht, new_tbl, he) == new_hash) { - link_old_to_new(ht, new_tbl, new_hash, he); - break; - } - } - unlock_buckets(new_tbl, old_tbl, new_hash); - cond_resched(); - } - - /* Unzip interleaved hash chains */ - while (!complete && !ht->being_destroyed) { - /* Wait for readers. All new readers will see the new - * table, and thus no references to the old table will - * remain. - */ - synchronize_rcu(); - - /* For each bucket in the old table (each of which - * contains items from multiple buckets of the new - * table): ... - */ - complete = true; - for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { - lock_buckets(new_tbl, old_tbl, old_hash); - - if (hashtable_chain_unzip(ht, new_tbl, old_tbl, - old_hash)) - complete = false; - - unlock_buckets(new_tbl, old_tbl, old_hash); - cond_resched(); - } - } - - rcu_assign_pointer(ht->tbl, new_tbl); - synchronize_rcu(); - - bucket_table_free(old_tbl); + rhashtable_rehash(ht, new_tbl); return 0; } EXPORT_SYMBOL_GPL(rhashtable_expand); @@ -467,49 +275,15 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); */ int rhashtable_shrink(struct rhashtable *ht) { - struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); - unsigned int new_hash; + struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); ASSERT_RHT_MUTEX(ht); - new_tbl = bucket_table_alloc(ht, tbl->size / 2); + new_tbl = bucket_table_alloc(ht, old_tbl->size / 2); if (new_tbl == NULL) return -ENOMEM; - rcu_assign_pointer(ht->future_tbl, new_tbl); - synchronize_rcu(); - - /* Link the first entry in the old bucket to the end of the - * bucket in the new table. As entries are concurrently being - * added to the new table, lock down the new bucket. As we - * always divide the size in half when shrinking, each bucket - * in the new table maps to exactly two buckets in the old - * table. - */ - for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { - lock_buckets(new_tbl, tbl, new_hash); - - rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), - tbl->buckets[new_hash]); - ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size); - rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), - tbl->buckets[new_hash + new_tbl->size]); - - unlock_buckets(new_tbl, tbl, new_hash); - cond_resched(); - } - - /* Publish the new, valid hash table */ - rcu_assign_pointer(ht->tbl, new_tbl); - atomic_dec(&ht->shift); - - /* Wait for readers. No new readers will have references to the - * old hash table. - */ - synchronize_rcu(); - - bucket_table_free(tbl); - + rhashtable_rehash(ht, new_tbl); return 0; } EXPORT_SYMBOL_GPL(rhashtable_shrink); @@ -518,7 +292,6 @@ static void rht_deferred_worker(struct work_struct *work) { struct rhashtable *ht; struct bucket_table *tbl; - struct rhashtable_walker *walker; ht = container_of(work, struct rhashtable, run_work); mutex_lock(&ht->mutex); @@ -527,333 +300,44 @@ static void rht_deferred_worker(struct work_struct *work) tbl = rht_dereference(ht->tbl, ht); - list_for_each_entry(walker, &ht->walkers, list) - walker->resize = true; - - if (rht_grow_above_75(ht, tbl->size)) + if (rht_grow_above_75(ht, tbl)) rhashtable_expand(ht); - else if (rht_shrink_below_30(ht, tbl->size)) + else if (rht_shrink_below_30(ht, tbl)) rhashtable_shrink(ht); unlock: mutex_unlock(&ht->mutex); } -static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, - struct bucket_table *tbl, - const struct bucket_table *old_tbl, u32 hash) +int rhashtable_insert_slow(struct rhashtable *ht, const void *key, + struct rhash_head *obj, + struct bucket_table *tbl) { - bool no_resize_running = tbl == old_tbl; struct rhash_head *head; - - hash = rht_bucket_index(tbl, hash); - head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); - - ASSERT_BUCKET_LOCK(ht, tbl, hash); - - if (rht_is_a_nulls(head)) - INIT_RHT_NULLS_HEAD(obj->next, ht, hash); - else - RCU_INIT_POINTER(obj->next, head); - - rcu_assign_pointer(tbl->buckets[hash], obj); - - atomic_inc(&ht->nelems); - if (no_resize_running && rht_grow_above_75(ht, tbl->size)) - schedule_work(&ht->run_work); -} - -/** - * rhashtable_insert - insert object into hash table - * @ht: hash table - * @obj: pointer to hash head inside object - * - * Will take a per bucket spinlock to protect against mutual mutations - * on the same bucket. Multiple insertions may occur in parallel unless - * they map to the same bucket lock. - * - * It is safe to call this function from atomic context. - * - * Will trigger an automatic deferred table resizing if the size grows - * beyond the watermark indicated by grow_decision() which can be passed - * to rhashtable_init(). - */ -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) -{ - struct bucket_table *tbl, *old_tbl; unsigned hash; + int err = -EEXIST; - rcu_read_lock(); - - tbl = rht_dereference_rcu(ht->future_tbl, ht); - old_tbl = rht_dereference_rcu(ht->tbl, ht); - hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); - - lock_buckets(tbl, old_tbl, hash); - __rhashtable_insert(ht, obj, tbl, old_tbl, hash); - unlock_buckets(tbl, old_tbl, hash); - - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(rhashtable_insert); - -/** - * rhashtable_remove - remove object from hash table - * @ht: hash table - * @obj: pointer to hash head inside object - * - * Since the hash chain is single linked, the removal operation needs to - * walk the bucket chain upon removal. The removal operation is thus - * considerable slow if the hash table is not correctly sized. - * - * Will automatically shrink the table via rhashtable_expand() if the - * shrink_decision function specified at rhashtable_init() returns true. - * - * The caller must ensure that no concurrent table mutations occur. It is - * however valid to have concurrent lookups if they are RCU protected. - */ -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) -{ - struct bucket_table *tbl, *new_tbl, *old_tbl; - struct rhash_head __rcu **pprev; - struct rhash_head *he, *he2; - unsigned int hash, new_hash; - bool ret = false; - - rcu_read_lock(); - old_tbl = rht_dereference_rcu(ht->tbl, ht); - tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht); - new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); - - lock_buckets(new_tbl, old_tbl, new_hash); -restart: - hash = rht_bucket_index(tbl, new_hash); - pprev = &tbl->buckets[hash]; - rht_for_each(he, tbl, hash) { - if (he != obj) { - pprev = &he->next; - continue; - } - - ASSERT_BUCKET_LOCK(ht, tbl, hash); - - if (old_tbl->size > new_tbl->size && tbl == old_tbl && - !rht_is_a_nulls(obj->next) && - head_hashfn(ht, tbl, obj->next) != hash) { - rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); - } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) { - rht_for_each_continue(he2, obj->next, tbl, hash) { - if (head_hashfn(ht, tbl, he2) == hash) { - rcu_assign_pointer(*pprev, he2); - goto found; - } - } - - rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); - } else { - rcu_assign_pointer(*pprev, obj->next); - } - -found: - ret = true; - break; - } - - /* The entry may be linked in either 'tbl', 'future_tbl', or both. - * 'future_tbl' only exists for a short period of time during - * resizing. Thus traversing both is fine and the added cost is - * very rare. - */ - if (tbl != old_tbl) { - tbl = old_tbl; - goto restart; - } - - unlock_buckets(new_tbl, old_tbl, new_hash); - - if (ret) { - bool no_resize_running = new_tbl == old_tbl; - - atomic_dec(&ht->nelems); - if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size)) - schedule_work(&ht->run_work); - } - - rcu_read_unlock(); - - return ret; -} -EXPORT_SYMBOL_GPL(rhashtable_remove); - -struct rhashtable_compare_arg { - struct rhashtable *ht; - const void *key; -}; - -static bool rhashtable_compare(void *ptr, void *arg) -{ - struct rhashtable_compare_arg *x = arg; - struct rhashtable *ht = x->ht; - - return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len); -} - -/** - * rhashtable_lookup - lookup key in hash table - * @ht: hash table - * @key: pointer to key - * - * Computes the hash value for the key and traverses the bucket chain looking - * for a entry with an identical key. The first matching entry is returned. - * - * This lookup function may only be used for fixed key hash table (key_len - * parameter set). It will BUG() if used inappropriately. - * - * Lookups may occur in parallel with hashtable mutations and resizing. - */ -void *rhashtable_lookup(struct rhashtable *ht, const void *key) -{ - struct rhashtable_compare_arg arg = { - .ht = ht, - .key = key, - }; - - BUG_ON(!ht->p.key_len); - - return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg); -} -EXPORT_SYMBOL_GPL(rhashtable_lookup); - -/** - * rhashtable_lookup_compare - search hash table with compare function - * @ht: hash table - * @key: the pointer to the key - * @compare: compare function, must return true on match - * @arg: argument passed on to compare function - * - * Traverses the bucket chain behind the provided hash value and calls the - * specified compare function for each entry. - * - * Lookups may occur in parallel with hashtable mutations and resizing. - * - * Returns the first entry on which the compare function returned true. - */ -void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, - bool (*compare)(void *, void *), void *arg) -{ - const struct bucket_table *tbl, *old_tbl; - struct rhash_head *he; - u32 hash; - - rcu_read_lock(); - - old_tbl = rht_dereference_rcu(ht->tbl, ht); - tbl = rht_dereference_rcu(ht->future_tbl, ht); - hash = key_hashfn(ht, key, ht->p.key_len); -restart: - rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { - if (!compare(rht_obj(ht, he), arg)) - continue; - rcu_read_unlock(); - return rht_obj(ht, he); - } - - if (unlikely(tbl != old_tbl)) { - tbl = old_tbl; - goto restart; - } - rcu_read_unlock(); - - return NULL; -} -EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); + hash = head_hashfn(ht, tbl, obj); + spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); -/** - * rhashtable_lookup_insert - lookup and insert object into hash table - * @ht: hash table - * @obj: pointer to hash head inside object - * - * Locks down the bucket chain in both the old and new table if a resize - * is in progress to ensure that writers can't remove from the old table - * and can't insert to the new table during the atomic operation of search - * and insertion. Searches for duplicates in both the old and new table if - * a resize is in progress. - * - * This lookup function may only be used for fixed key hash table (key_len - * parameter set). It will BUG() if used inappropriately. - * - * It is safe to call this function from atomic context. - * - * Will trigger an automatic deferred table resizing if the size grows - * beyond the watermark indicated by grow_decision() which can be passed - * to rhashtable_init(). - */ -bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) -{ - struct rhashtable_compare_arg arg = { - .ht = ht, - .key = rht_obj(ht, obj) + ht->p.key_offset, - }; - - BUG_ON(!ht->p.key_len); - - return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare, - &arg); -} -EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); - -/** - * rhashtable_lookup_compare_insert - search and insert object to hash table - * with compare function - * @ht: hash table - * @obj: pointer to hash head inside object - * @compare: compare function, must return true on match - * @arg: argument passed on to compare function - * - * Locks down the bucket chain in both the old and new table if a resize - * is in progress to ensure that writers can't remove from the old table - * and can't insert to the new table during the atomic operation of search - * and insertion. Searches for duplicates in both the old and new table if - * a resize is in progress. - * - * Lookups may occur in parallel with hashtable mutations and resizing. - * - * Will trigger an automatic deferred table resizing if the size grows - * beyond the watermark indicated by grow_decision() which can be passed - * to rhashtable_init(). - */ -bool rhashtable_lookup_compare_insert(struct rhashtable *ht, - struct rhash_head *obj, - bool (*compare)(void *, void *), - void *arg) -{ - struct bucket_table *new_tbl, *old_tbl; - u32 new_hash; - bool success = true; + if (key && rhashtable_lookup_fast(ht, key, ht->p)) + goto exit; - BUG_ON(!ht->p.key_len); + err = 0; - rcu_read_lock(); - old_tbl = rht_dereference_rcu(ht->tbl, ht); - new_tbl = rht_dereference_rcu(ht->future_tbl, ht); - new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); + head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); - lock_buckets(new_tbl, old_tbl, new_hash); + RCU_INIT_POINTER(obj->next, head); - if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset, - compare, arg)) { - success = false; - goto exit; - } + rcu_assign_pointer(tbl->buckets[hash], obj); - __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash); + atomic_inc(&ht->nelems); exit: - unlock_buckets(new_tbl, old_tbl, new_hash); - rcu_read_unlock(); + spin_unlock(rht_bucket_lock(tbl, hash)); - return success; + return err; } -EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); +EXPORT_SYMBOL_GPL(rhashtable_insert_slow); /** * rhashtable_walk_init - Initialise an iterator @@ -887,11 +371,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) if (!iter->walker) return -ENOMEM; - INIT_LIST_HEAD(&iter->walker->list); - iter->walker->resize = false; - mutex_lock(&ht->mutex); - list_add(&iter->walker->list, &ht->walkers); + iter->walker->tbl = rht_dereference(ht->tbl, ht); + list_add(&iter->walker->list, &iter->walker->tbl->walkers); mutex_unlock(&ht->mutex); return 0; @@ -907,7 +389,8 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init); void rhashtable_walk_exit(struct rhashtable_iter *iter) { mutex_lock(&iter->ht->mutex); - list_del(&iter->walker->list); + if (iter->walker->tbl) + list_del(&iter->walker->list); mutex_unlock(&iter->ht->mutex); kfree(iter->walker); } @@ -928,13 +411,21 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit); * by calling rhashtable_walk_next. */ int rhashtable_walk_start(struct rhashtable_iter *iter) + __acquires(RCU) { + struct rhashtable *ht = iter->ht; + + mutex_lock(&ht->mutex); + + if (iter->walker->tbl) + list_del(&iter->walker->list); + rcu_read_lock(); - if (iter->walker->resize) { - iter->slot = 0; - iter->skip = 0; - iter->walker->resize = false; + mutex_unlock(&ht->mutex); + + if (!iter->walker->tbl) { + iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); return -EAGAIN; } @@ -956,13 +447,11 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start); */ void *rhashtable_walk_next(struct rhashtable_iter *iter) { - const struct bucket_table *tbl; + struct bucket_table *tbl = iter->walker->tbl; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; void *obj = NULL; - tbl = rht_dereference_rcu(ht->tbl, ht); - if (p) { p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); goto next; @@ -988,17 +477,17 @@ next: iter->skip = 0; } - iter->p = NULL; - -out: - if (iter->walker->resize) { - iter->p = NULL; + iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (iter->walker->tbl) { iter->slot = 0; iter->skip = 0; - iter->walker->resize = false; return ERR_PTR(-EAGAIN); } + iter->p = NULL; + +out: + return obj; } EXPORT_SYMBOL_GPL(rhashtable_walk_next); @@ -1010,16 +499,34 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_next); * Finish a hash table walk. */ void rhashtable_walk_stop(struct rhashtable_iter *iter) + __releases(RCU) { - rcu_read_unlock(); + struct rhashtable *ht; + struct bucket_table *tbl = iter->walker->tbl; + + if (!tbl) + goto out; + + ht = iter->ht; + + mutex_lock(&ht->mutex); + if (tbl->rehash < tbl->size) + list_add(&iter->walker->list, &tbl->walkers); + else + iter->walker->tbl = NULL; + mutex_unlock(&ht->mutex); + iter->p = NULL; + +out: + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rhashtable_walk_stop); -static size_t rounded_hashtable_size(struct rhashtable_params *params) +static size_t rounded_hashtable_size(const struct rhashtable_params *params) { return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), - 1UL << params->min_shift); + (unsigned long)params->min_size); } /** @@ -1065,30 +572,35 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) * .obj_hashfn = my_hash_fn, * }; */ -int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) +int rhashtable_init(struct rhashtable *ht, + const struct rhashtable_params *params) { struct bucket_table *tbl; size_t size; size = HASH_DEFAULT_SIZE; - if ((params->key_len && !params->hashfn) || - (!params->key_len && !params->obj_hashfn)) + if ((!(params->key_len && params->hashfn) && !params->obj_hashfn) || + (params->obj_hashfn && !params->obj_cmpfn)) return -EINVAL; if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) return -EINVAL; - params->min_shift = max_t(size_t, params->min_shift, - ilog2(HASH_MIN_SIZE)); - if (params->nelem_hint) size = rounded_hashtable_size(params); memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); memcpy(&ht->p, params, sizeof(*params)); - INIT_LIST_HEAD(&ht->walkers); + + if (params->min_size) + ht->p.min_size = roundup_pow_of_two(params->min_size); + + if (params->max_size) + ht->p.max_size = rounddown_pow_of_two(params->max_size); + + ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); if (params->locks_mul) ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); @@ -1100,12 +612,8 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) return -ENOMEM; atomic_set(&ht->nelems, 0); - atomic_set(&ht->shift, ilog2(tbl->size)); - RCU_INIT_POINTER(ht->tbl, tbl); - RCU_INIT_POINTER(ht->future_tbl, tbl); - if (!ht->p.hash_rnd) - get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); + RCU_INIT_POINTER(ht->tbl, tbl); INIT_WORK(&ht->run_work, rht_deferred_worker); diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 67c7593d1dd6..a2ba6adb60a2 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -38,6 +38,16 @@ struct test_obj { struct rhash_head node; }; +static const struct rhashtable_params test_rht_params = { + .nelem_hint = TEST_HT_SIZE, + .head_offset = offsetof(struct test_obj, node), + .key_offset = offsetof(struct test_obj, value), + .key_len = sizeof(int), + .hashfn = jhash, + .max_size = 2, /* we expand/shrink manually here */ + .nulls_base = (3U << RHT_BASE_SHIFT), +}; + static int __init test_rht_lookup(struct rhashtable *ht) { unsigned int i; @@ -47,7 +57,7 @@ static int __init test_rht_lookup(struct rhashtable *ht) bool expected = !(i % 2); u32 key = i; - obj = rhashtable_lookup(ht, &key); + obj = rhashtable_lookup_fast(ht, &key, test_rht_params); if (expected && !obj) { pr_warn("Test failed: Could not find key %u\n", key); @@ -80,7 +90,7 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet) rcu_cnt = cnt = 0; if (!quiet) - pr_info(" [%#4x/%zu]", i, tbl->size); + pr_info(" [%#4x/%u]", i, tbl->size); rht_for_each_entry_rcu(obj, pos, tbl, i, node) { cnt++; @@ -133,7 +143,11 @@ static int __init test_rhashtable(struct rhashtable *ht) obj->ptr = TEST_PTR; obj->value = i * 2; - rhashtable_insert(ht, &obj->node); + err = rhashtable_insert_fast(ht, &obj->node, test_rht_params); + if (err) { + kfree(obj); + goto error; + } } rcu_read_lock(); @@ -173,10 +187,10 @@ static int __init test_rhashtable(struct rhashtable *ht) for (i = 0; i < TEST_ENTRIES; i++) { u32 key = i * 2; - obj = rhashtable_lookup(ht, &key); + obj = rhashtable_lookup_fast(ht, &key, test_rht_params); BUG_ON(!obj); - rhashtable_remove(ht, &obj->node); + rhashtable_remove_fast(ht, &obj->node, test_rht_params); kfree(obj); } @@ -195,20 +209,11 @@ static struct rhashtable ht; static int __init test_rht_init(void) { - struct rhashtable_params params = { - .nelem_hint = TEST_HT_SIZE, - .head_offset = offsetof(struct test_obj, node), - .key_offset = offsetof(struct test_obj, value), - .key_len = sizeof(int), - .hashfn = jhash, - .max_shift = 1, /* we expand/shrink manually here */ - .nulls_base = (3U << RHT_BASE_SHIFT), - }; int err; pr_info("Running resizable hashtable tests...\n"); - err = rhashtable_init(&ht, ¶ms); + err = rhashtable_init(&ht, &test_rht_params); if (err < 0) { pr_warn("Test failed: Unable to initialize hashtable: %d\n", err); diff --git a/mm/Makefile b/mm/Makefile index 3c1caa2693bd..15dbe9903c27 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ compaction.o vmacache.o \ interval_tree.o list_lru.o workingset.o \ - iov_iter.o debug.o $(mmu-y) + debug.o $(mmu-y) obj-y += init-mm.o @@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) return (1UL << (align_order - cma->order_per_bit)) - 1; } +/* + * Find a PFN aligned to the specified order and return an offset represented in + * order_per_bits. + */ static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) { - unsigned int alignment; - if (align_order <= cma->order_per_bit) return 0; - alignment = 1UL << (align_order - cma->order_per_bit); - return ALIGN(cma->base_pfn, alignment) - - (cma->base_pfn >> cma->order_per_bit); + + return (ALIGN(cma->base_pfn, (1UL << align_order)) + - cma->base_pfn) >> cma->order_per_bit; } static unsigned long cma_bitmap_maxno(struct cma *cma) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fc00c8cb5a82..626e93db28ba 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1295,8 +1295,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, * Avoid grouping on DSO/COW pages in specific and RO pages * in general, RO pages shouldn't hurt as much anyway since * they can be in shared cache state. + * + * FIXME! This checks "pmd_dirty()" as an approximation of + * "is this a read-only page", since checking "pmd_write()" + * is even more broken. We haven't actually turned this into + * a writable page, so pmd_write() will always be false. */ - if (!pmd_write(pmd)) + if (!pmd_dirty(pmd)) flags |= TNF_NO_GROUP; /* @@ -1482,6 +1487,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { pmd_t entry; + ret = 1; /* * Avoid trapping faults against the zero page. The read-only @@ -1490,11 +1496,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, */ if (prot_numa && is_huge_zero_pmd(*pmd)) { spin_unlock(ptl); - return 0; + return ret; } if (!prot_numa || !pmd_protnone(*pmd)) { - ret = 1; entry = pmdp_get_and_clear_notify(mm, addr, pmd); entry = pmd_modify(entry, newprot); ret = HPAGE_PMD_NR; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a9ac6c26832..c41b2a0ee273 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) __SetPageHead(page); __ClearPageReserved(page); for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { - __SetPageTail(p); /* * For gigantic hugepages allocated through bootmem at * boot, it's safer to be consistent with the not-gigantic @@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) __ClearPageReserved(p); set_page_count(p, 0); p->first_page = page; + /* Make sure p->first_page is always valid for PageTail() */ + smp_wmb(); + __SetPageTail(p); } } diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 78fee632a7ee..936d81661c47 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -29,6 +29,7 @@ #include <linux/stacktrace.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/vmalloc.h> #include <linux/kasan.h> #include "kasan.h" @@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size) GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, __builtin_return_address(0)); - return ret ? 0 : -ENOMEM; + + if (ret) { + find_vm_area(addr)->flags |= VM_KASAN; + return 0; + } + + return -ENOMEM; } -void kasan_module_free(void *addr) +void kasan_free_shadow(const struct vm_struct *vm) { - vfree(kasan_mem_to_shadow(addr)); + if (vm->flags & VM_KASAN) + vfree(kasan_mem_to_shadow(vm->addr)); } static void register_global(struct kasan_global *global) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9fe07692eaad..b34ef4a32a3b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) * on for the root memcg is enough. */ if (cgroup_on_dfl(root_css->cgroup)) - mem_cgroup_from_css(root_css)->use_hierarchy = true; + root_mem_cgroup->use_hierarchy = true; + else + root_mem_cgroup->use_hierarchy = false; } static u64 memory_current_read(struct cgroup_subsys_state *css, diff --git a/mm/memory.c b/mm/memory.c index 8068893697bb..411144f977b1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3072,8 +3072,13 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, * Avoid grouping on DSO/COW pages in specific and RO pages * in general, RO pages shouldn't hurt as much anyway since * they can be in shared cache state. + * + * FIXME! This checks "pmd_dirty()" as an approximation of + * "is this a read-only page", since checking "pmd_write()" + * is even more broken. We haven't actually turned this into + * a writable page, so pmd_write() will always be false. */ - if (!pte_write(pte)) + if (!pte_dirty(pte)) flags |= TNF_NO_GROUP; /* diff --git a/mm/mlock.c b/mm/mlock.c index 73cf0987088c..8a54cd214925 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -26,10 +26,10 @@ int can_do_mlock(void) { - if (capable(CAP_IPC_LOCK)) - return 1; if (rlimit(RLIMIT_MEMLOCK) != 0) return 1; + if (capable(CAP_IPC_LOCK)) + return 1; return 0; } EXPORT_SYMBOL(can_do_mlock); diff --git a/mm/nommu.c b/mm/nommu.c index 3e67e7538ecf..3fba2dc97c44 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -62,6 +62,7 @@ void *high_memory; EXPORT_SYMBOL(high_memory); struct page *mem_map; unsigned long max_mapnr; +EXPORT_SYMBOL(max_mapnr); unsigned long highest_memmap_pfn; struct percpu_counter vm_committed_as; int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7abfa70cdc1a..40e29429e7b0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2373,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, goto out; } /* Exhausted what can be done so it's blamo time */ - if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)) + if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) + || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) *did_some_progress = 1; out: oom_zonelist_unlock(ac->zonelist, gfp_mask); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 35b25e1340ca..49abccf29a29 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr) spin_unlock(&vmap_area_lock); vmap_debug_free_range(va->va_start, va->va_end); + kasan_free_shadow(vm); free_unmap_vmap_area(va); vm->size -= PAGE_SIZE; diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 64c6bed4a3d3..98a30a5b8664 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -413,7 +413,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, vlan_transfer_features(dev, vlandev); break; - case NETDEV_DOWN: + case NETDEV_DOWN: { + struct net_device *tmp; + LIST_HEAD(close_list); + if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) vlan_vid_del(dev, htons(ETH_P_8021Q), 0); @@ -425,11 +428,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, vlan = vlan_dev_priv(vlandev); if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) - dev_change_flags(vlandev, flgs & ~IFF_UP); + list_add(&vlandev->close_list, &close_list); + } + + dev_close_many(&close_list, false); + + list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) { netif_stacked_transfer_operstate(dev, vlandev); + list_del_init(&vlandev->close_list); } + list_del(&close_list); break; - + } case NETDEV_UP: /* Put all VLANs for this dev in the up state too. */ vlan_group_for_each_dev(grp, i, vlandev) { diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 80d08f6664cb..3e3d82d8ff70 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -940,7 +940,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) sin_server.sin_family = AF_INET; sin_server.sin_addr.s_addr = in_aton(addr); sin_server.sin_port = htons(opts.port); - err = __sock_create(read_pnet(¤t->nsproxy->net_ns), PF_INET, + err = __sock_create(current->nsproxy->net_ns, PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket, 1); if (err) { pr_err("%s (%d): problem creating socket\n", @@ -988,7 +988,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) sun_server.sun_family = PF_UNIX; strcpy(sun_server.sun_path, addr); - err = __sock_create(read_pnet(¤t->nsproxy->net_ns), PF_UNIX, + err = __sock_create(current->nsproxy->net_ns, PF_UNIX, SOCK_STREAM, 0, &csocket, 1); if (err < 0) { pr_err("%s (%d): problem creating socket\n", diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index d8e376a5f0f1..36a1a739ad68 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -658,14 +658,30 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) static void p9_virtio_remove(struct virtio_device *vdev) { struct virtio_chan *chan = vdev->priv; - - if (chan->inuse) - p9_virtio_close(chan->client); - vdev->config->del_vqs(vdev); + unsigned long warning_time; mutex_lock(&virtio_9p_lock); + + /* Remove self from list so we don't get new users. */ list_del(&chan->chan_list); + warning_time = jiffies; + + /* Wait for existing users to close. */ + while (chan->inuse) { + mutex_unlock(&virtio_9p_lock); + msleep(250); + if (time_after(jiffies, warning_time + 10 * HZ)) { + dev_emerg(&vdev->dev, + "p9_virtio_remove: waiting for device in use.\n"); + warning_time = jiffies; + } + mutex_lock(&virtio_9p_lock); + } + mutex_unlock(&virtio_9p_lock); + + vdev->config->del_vqs(vdev); + sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); kfree(chan->tag); diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 5d608799717e..9a8ea232d28f 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -13,7 +13,7 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ - a2mp.o amp.o ecc.o hci_request.o + a2mp.o amp.o ecc.o hci_request.o mgmt_util.o bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 20a4698e2255..70f9d945faf7 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -749,6 +749,13 @@ static int __init bt_init(void) goto sock_err; } + err = mgmt_init(); + if (err < 0) { + sco_exit(); + l2cap_exit(); + goto sock_err; + } + return 0; sock_err: @@ -763,6 +770,8 @@ error: static void __exit bt_exit(void) { + mgmt_exit(); + sco_exit(); l2cap_exit(); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 91ebb9cb31de..ee5e59839b02 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -571,7 +571,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) list_for_each_entry(d, &hci_dev_list, list) { if (!test_bit(HCI_UP, &d->flags) || - test_bit(HCI_USER_CHANNEL, &d->dev_flags) || + hci_dev_test_flag(d, HCI_USER_CHANNEL) || d->dev_type != HCI_BREDR) continue; @@ -700,7 +700,7 @@ static void hci_req_directed_advertising(struct hci_request *req, * and write a new random address. The flag will be set back on * as soon as the SET_ADV_ENABLE HCI command completes. */ - clear_bit(HCI_LE_ADV, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LE_ADV); /* Set require_privacy to false so that the remote device has a * chance of identifying us. @@ -734,7 +734,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, int err; /* Let's make sure that le is enabled.*/ - if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (lmp_le_capable(hdev)) return ERR_PTR(-ECONNREFUSED); @@ -799,7 +799,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, * anyway have to disable it in order to start directed * advertising. */ - if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) { u8 enable = 0x00; hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); @@ -810,7 +810,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, /* If we're active scanning most controllers are unable * to initiate advertising. Simply reject the attempt. */ - if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->le_scan_type == LE_SCAN_ACTIVE) { skb_queue_purge(&req.cmd_q); hci_conn_del(conn); @@ -840,9 +840,9 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, * handler for scan disabling knows to set the correct discovery * state. */ - if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_req_add_le_scan_disable(&req); - set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); } hci_req_add_le_create_conn(&req, conn); @@ -864,7 +864,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, { struct hci_conn *acl; - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { if (lmp_bredr_capable(hdev)) return ERR_PTR(-ECONNREFUSED); @@ -942,7 +942,7 @@ int hci_conn_check_link_mode(struct hci_conn *conn) * Connections is used and the link is encrypted with AES-CCM * using a P-256 authenticated combination key. */ - if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) { + if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) { if (!hci_conn_sc_enabled(conn) || !test_bit(HCI_CONN_AES_CCM, &conn->flags) || conn->key_type != HCI_LK_AUTH_COMBINATION_P256) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index bba4c344c6e0..773f2164d9a1 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -80,7 +80,7 @@ static ssize_t dut_mode_read(struct file *file, char __user *user_buf, struct hci_dev *hdev = file->private_data; char buf[3]; - buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N'; + buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); @@ -106,7 +106,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, if (strtobool(buf, &enable)) return -EINVAL; - if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags)) + if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) return -EALREADY; hci_req_lock(hdev); @@ -127,7 +127,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, if (err < 0) return err; - change_bit(HCI_DUT_MODE, &hdev->dbg_flags); + hci_dev_change_flag(hdev, HCI_DUT_MODE); return count; } @@ -501,7 +501,7 @@ static void le_setup(struct hci_request *req) /* LE-only controllers have LE implicitly enabled */ if (!lmp_bredr_capable(hdev)) - set_bit(HCI_LE_ENABLED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_LE_ENABLED); } static void hci_setup_event_mask(struct hci_request *req) @@ -591,7 +591,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt) if (lmp_bredr_capable(hdev)) bredr_setup(req); else - clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); if (lmp_le_capable(hdev)) le_setup(req); @@ -617,7 +617,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt) */ hdev->max_page = 0x01; - if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { u8 mode = 0x01; hci_req_add(req, HCI_OP_WRITE_SSP_MODE, @@ -656,7 +656,7 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt) sizeof(cp), &cp); } - if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { u8 enable = 1; hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), &enable); @@ -693,7 +693,7 @@ static void hci_set_le_support(struct hci_request *req) memset(&cp, 0, sizeof(cp)); - if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { cp.le = 0x01; cp.simul = 0x00; } @@ -881,7 +881,7 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); /* Enable Secure Connections if supported and configured */ - if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && bredr_sc_enabled(hdev)) { u8 support = 0x01; @@ -901,7 +901,7 @@ static int __hci_init(struct hci_dev *hdev) /* The Device Under Test (DUT) mode is special and available for * all controller types. So just create it early on. */ - if (test_bit(HCI_SETUP, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_SETUP)) { debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, &dut_mode_fops); } @@ -937,8 +937,8 @@ static int __hci_init(struct hci_dev *hdev) * So only when in setup phase or config phase, create the debugfs * entries and register the SMP channels. */ - if (!test_bit(HCI_SETUP, &hdev->dev_flags) && - !test_bit(HCI_CONFIG, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) return 0; hci_debugfs_create_common(hdev); @@ -1300,12 +1300,12 @@ int hci_inquiry(void __user *arg) if (!hdev) return -ENODEV; - if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } @@ -1315,7 +1315,7 @@ int hci_inquiry(void __user *arg) goto done; } - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; } @@ -1387,17 +1387,17 @@ static int hci_dev_do_open(struct hci_dev *hdev) hci_req_lock(hdev); - if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { ret = -ENODEV; goto done; } - if (!test_bit(HCI_SETUP, &hdev->dev_flags) && - !test_bit(HCI_CONFIG, &hdev->dev_flags)) { + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { /* Check for rfkill but allow the HCI setup stage to * proceed (which in itself doesn't cause any RF activity). */ - if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { ret = -ERFKILL; goto done; } @@ -1414,7 +1414,7 @@ static int hci_dev_do_open(struct hci_dev *hdev) * This check is only valid for BR/EDR controllers * since AMP controllers do not have an address. */ - if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hdev->dev_type == HCI_BREDR && !bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY)) { @@ -1436,7 +1436,7 @@ static int hci_dev_do_open(struct hci_dev *hdev) atomic_set(&hdev->cmd_cnt, 1); set_bit(HCI_INIT, &hdev->flags); - if (test_bit(HCI_SETUP, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_SETUP)) { if (hdev->setup) ret = hdev->setup(hdev); @@ -1448,7 +1448,7 @@ static int hci_dev_do_open(struct hci_dev *hdev) */ if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks)) - set_bit(HCI_UNCONFIGURED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_UNCONFIGURED); /* For an unconfigured controller it is required to * read at least the version information provided by @@ -1458,11 +1458,11 @@ static int hci_dev_do_open(struct hci_dev *hdev) * also the original Bluetooth public device address * will be read using the Read BD Address command. */ - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) ret = __hci_unconf_init(hdev); } - if (test_bit(HCI_CONFIG, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_CONFIG)) { /* If public address change is configured, ensure that * the address gets programmed. If the driver does not * support changing the public address, fail the power @@ -1476,8 +1476,8 @@ static int hci_dev_do_open(struct hci_dev *hdev) } if (!ret) { - if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) && - !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) ret = __hci_init(hdev); } @@ -1485,13 +1485,13 @@ static int hci_dev_do_open(struct hci_dev *hdev) if (!ret) { hci_dev_hold(hdev); - set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); set_bit(HCI_UP, &hdev->flags); hci_notify(hdev, HCI_DEV_UP); - if (!test_bit(HCI_SETUP, &hdev->dev_flags) && - !test_bit(HCI_CONFIG, &hdev->dev_flags) && - !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) && - !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG) && + !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && hdev->dev_type == HCI_BREDR) { hci_dev_lock(hdev); mgmt_powered(hdev, 1); @@ -1543,8 +1543,8 @@ int hci_dev_open(__u16 dev) * HCI_USER_CHANNEL will be set first before attempting to * open the device. */ - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) && - !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EOPNOTSUPP; goto done; } @@ -1554,7 +1554,7 @@ int hci_dev_open(__u16 dev) * particularly important if the setup procedure has not yet * completed. */ - if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) + if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) cancel_delayed_work(&hdev->power_off); /* After this call it is guaranteed that the setup procedure @@ -1569,9 +1569,9 @@ int hci_dev_open(__u16 dev) * is in use this bit will be cleared again and userspace has * to explicitly enable it. */ - if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && - !test_bit(HCI_MGMT, &hdev->dev_flags)) - set_bit(HCI_BONDABLE, &hdev->dev_flags); + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + !hci_dev_test_flag(hdev, HCI_MGMT)) + hci_dev_set_flag(hdev, HCI_BONDABLE); err = hci_dev_do_open(hdev); @@ -1601,7 +1601,7 @@ static int hci_dev_do_close(struct hci_dev *hdev) { BT_DBG("%s %p", hdev->name, hdev); - if (!test_bit(HCI_UNREGISTER, &hdev->dev_flags)) { + if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) { /* Execute vendor specific shutdown routine */ if (hdev->shutdown) hdev->shutdown(hdev); @@ -1625,17 +1625,17 @@ static int hci_dev_do_close(struct hci_dev *hdev) if (hdev->discov_timeout > 0) { cancel_delayed_work(&hdev->discov_off); hdev->discov_timeout = 0; - clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); - clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); } - if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) + if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) cancel_delayed_work(&hdev->service_cache); cancel_delayed_work_sync(&hdev->le_scan_disable); cancel_delayed_work_sync(&hdev->le_scan_restart); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) cancel_delayed_work_sync(&hdev->rpa_expired); /* Avoid potential lockdep warnings from the *_flush() calls by @@ -1647,7 +1647,7 @@ static int hci_dev_do_close(struct hci_dev *hdev) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { + if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { if (hdev->dev_type == HCI_BREDR) mgmt_powered(hdev, 0); } @@ -1667,8 +1667,8 @@ static int hci_dev_do_close(struct hci_dev *hdev) /* Reset device */ skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); - if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) && - !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) && + if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) && + !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { set_bit(HCI_INIT, &hdev->flags); __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); @@ -1699,7 +1699,7 @@ static int hci_dev_do_close(struct hci_dev *hdev) /* Clear flags */ hdev->flags &= BIT(HCI_RAW); - hdev->dev_flags &= ~HCI_PERSISTENT_MASK; + hci_dev_clear_volatile_flags(hdev); /* Controller radio is available but is currently powered down */ hdev->amp_status = AMP_STATUS_POWERED_DOWN; @@ -1723,12 +1723,12 @@ int hci_dev_close(__u16 dev) if (!hdev) return -ENODEV; - if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } - if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) + if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) cancel_delayed_work(&hdev->power_off); err = hci_dev_do_close(hdev); @@ -1786,12 +1786,12 @@ int hci_dev_reset(__u16 dev) goto done; } - if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } @@ -1812,12 +1812,12 @@ int hci_dev_reset_stat(__u16 dev) if (!hdev) return -ENODEV; - if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { ret = -EBUSY; goto done; } - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { ret = -EOPNOTSUPP; goto done; } @@ -1836,29 +1836,29 @@ static void hci_update_scan_state(struct hci_dev *hdev, u8 scan) BT_DBG("%s scan 0x%02x", hdev->name, scan); if ((scan & SCAN_PAGE)) - conn_changed = !test_and_set_bit(HCI_CONNECTABLE, - &hdev->dev_flags); + conn_changed = !hci_dev_test_and_set_flag(hdev, + HCI_CONNECTABLE); else - conn_changed = test_and_clear_bit(HCI_CONNECTABLE, - &hdev->dev_flags); + conn_changed = hci_dev_test_and_clear_flag(hdev, + HCI_CONNECTABLE); if ((scan & SCAN_INQUIRY)) { - discov_changed = !test_and_set_bit(HCI_DISCOVERABLE, - &hdev->dev_flags); + discov_changed = !hci_dev_test_and_set_flag(hdev, + HCI_DISCOVERABLE); } else { - clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); - discov_changed = test_and_clear_bit(HCI_DISCOVERABLE, - &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + discov_changed = hci_dev_test_and_clear_flag(hdev, + HCI_DISCOVERABLE); } - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; if (conn_changed || discov_changed) { /* In case this was disabled through mgmt */ - set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); - if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) mgmt_update_adv_data(hdev); mgmt_new_settings(hdev); @@ -1878,12 +1878,12 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) if (!hdev) return -ENODEV; - if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } @@ -1893,7 +1893,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) goto done; } - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; } @@ -1997,7 +1997,7 @@ int hci_get_dev_list(void __user *arg) * is running, but in that case still indicate that the * device is actually down. */ - if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags &= ~BIT(HCI_UP); (dr + n)->dev_id = hdev->id; @@ -2035,7 +2035,7 @@ int hci_get_dev_info(void __user *arg) * is running, but in that case still indicate that the * device is actually down. */ - if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags = hdev->flags & ~BIT(HCI_UP); else flags = hdev->flags; @@ -2078,16 +2078,16 @@ static int hci_rfkill_set_block(void *data, bool blocked) BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); - if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; if (blocked) { - set_bit(HCI_RFKILLED, &hdev->dev_flags); - if (!test_bit(HCI_SETUP, &hdev->dev_flags) && - !test_bit(HCI_CONFIG, &hdev->dev_flags)) + hci_dev_set_flag(hdev, HCI_RFKILLED); + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) hci_dev_do_close(hdev); } else { - clear_bit(HCI_RFKILLED, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_RFKILLED); } return 0; @@ -2116,23 +2116,23 @@ static void hci_power_on(struct work_struct *work) * ignored and they need to be checked now. If they are still * valid, it is important to turn the device back off. */ - if (test_bit(HCI_RFKILLED, &hdev->dev_flags) || - test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) || + if (hci_dev_test_flag(hdev, HCI_RFKILLED) || + hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || (hdev->dev_type == HCI_BREDR && !bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { - clear_bit(HCI_AUTO_OFF, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_do_close(hdev); - } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { + } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { queue_delayed_work(hdev->req_workqueue, &hdev->power_off, HCI_AUTO_OFF_TIMEOUT); } - if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) { + if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { /* For unconfigured devices, set the HCI_RAW flag * so that userspace can easily identify them. */ - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) set_bit(HCI_RAW, &hdev->flags); /* For fully configured devices, this will send @@ -2143,11 +2143,11 @@ static void hci_power_on(struct work_struct *work) * and no event will be send. */ mgmt_index_added(hdev); - } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) { + } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { /* When the controller is now configured, then it * is important to clear the HCI_RAW flag. */ - if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) clear_bit(HCI_RAW, &hdev->flags); /* Powering on the controller with HCI_CONFIG set only @@ -2516,6 +2516,42 @@ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) } } +bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +{ + struct smp_ltk *k; + struct smp_irk *irk; + u8 addr_type; + + if (type == BDADDR_BREDR) { + if (hci_find_link_key(hdev, bdaddr)) + return true; + return false; + } + + /* Convert to HCI addr type which struct smp_ltk uses */ + if (type == BDADDR_LE_PUBLIC) + addr_type = ADDR_LE_DEV_PUBLIC; + else + addr_type = ADDR_LE_DEV_RANDOM; + + irk = hci_get_irk(hdev, bdaddr, addr_type); + if (irk) { + bdaddr = &irk->bdaddr; + addr_type = irk->addr_type; + } + + rcu_read_lock(); + list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + + return false; +} + /* HCI command timer function */ static void hci_cmd_timeout(struct work_struct *work) { @@ -2866,12 +2902,26 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status, hci_dev_lock(hdev); - hci_inquiry_cache_flush(hdev); + if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, + &hdev->quirks)) { + /* If we were running LE only scan, change discovery + * state. If we were running both LE and BR/EDR inquiry + * simultaneously, and BR/EDR inquiry is already + * finished, stop discovery, otherwise BR/EDR inquiry + * will stop discovery when finished. + */ + if (!test_bit(HCI_INQUIRY, &hdev->flags)) + hci_discovery_set_state(hdev, + DISCOVERY_STOPPED); + } else { + hci_inquiry_cache_flush(hdev); - err = hci_req_run(&req, inquiry_complete); - if (err) { - BT_ERR("Inquiry request failed: err %d", err); - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + err = hci_req_run(&req, inquiry_complete); + if (err) { + BT_ERR("Inquiry request failed: err %d", err); + hci_discovery_set_state(hdev, + DISCOVERY_STOPPED); + } } hci_dev_unlock(hdev); @@ -2950,7 +3000,7 @@ static void le_scan_restart_work(struct work_struct *work) BT_DBG("%s", hdev->name); /* If controller is not scanning we are done. */ - if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; hci_req_init(&req, hdev); @@ -2983,9 +3033,9 @@ static void le_scan_restart_work(struct work_struct *work) void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *bdaddr_type) { - if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) || + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || - (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { bacpy(bdaddr, &hdev->static_addr); *bdaddr_type = ADDR_LE_DEV_RANDOM; @@ -3153,16 +3203,16 @@ int hci_register_dev(struct hci_dev *hdev) } if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) - set_bit(HCI_RFKILLED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_RFKILLED); - set_bit(HCI_SETUP, &hdev->dev_flags); - set_bit(HCI_AUTO_OFF, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_SETUP); + hci_dev_set_flag(hdev, HCI_AUTO_OFF); if (hdev->dev_type == HCI_BREDR) { /* Assume BR/EDR support until proven otherwise (such as * through reading supported features during init. */ - set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); } write_lock(&hci_dev_list_lock); @@ -3173,7 +3223,7 @@ int hci_register_dev(struct hci_dev *hdev) * and should not be included in normal operation. */ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) - set_bit(HCI_UNCONFIGURED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_UNCONFIGURED); hci_notify(hdev, HCI_DEV_REG); hci_dev_hold(hdev); @@ -3199,7 +3249,7 @@ void hci_unregister_dev(struct hci_dev *hdev) BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - set_bit(HCI_UNREGISTER, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_UNREGISTER); id = hdev->id; @@ -3215,8 +3265,8 @@ void hci_unregister_dev(struct hci_dev *hdev) cancel_work_sync(&hdev->power_on); if (!test_bit(HCI_INIT, &hdev->flags) && - !test_bit(HCI_SETUP, &hdev->dev_flags) && - !test_bit(HCI_CONFIG, &hdev->dev_flags)) { + !hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { hci_dev_lock(hdev); mgmt_index_removed(hdev); hci_dev_unlock(hdev); @@ -3890,7 +3940,7 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) { - if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { /* ACL tx timeout must be longer than maximum * link supervision timeout (40.9 seconds) */ if (!cnt && time_after(jiffies, hdev->acl_last_tx + @@ -4073,7 +4123,7 @@ static void hci_sched_le(struct hci_dev *hdev) if (!hci_conn_num(hdev, LE_LINK)) return; - if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { /* LE tx timeout must be longer than maximum * link supervision timeout (40.9 seconds) */ if (!hdev->le_cnt && hdev->le_pkts && @@ -4121,7 +4171,7 @@ static void hci_tx_work(struct work_struct *work) BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt, hdev->le_cnt); - if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { /* Schedule queues and send stuff to HCI driver */ hci_sched_acl(hdev); hci_sched_sco(hdev); @@ -4318,7 +4368,7 @@ static void hci_rx_work(struct work_struct *work) hci_send_to_sock(hdev, skb); } - if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { kfree_skb(skb); continue; } diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index 65261e5d4b84..0818fabf346a 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c @@ -166,7 +166,7 @@ static int remote_oob_show(struct seq_file *f, void *ptr) seq_printf(f, "%pMR (type %u) %u %*phN %*phN %*phN %*phN\n", &data->bdaddr, data->bdaddr_type, data->present, 16, data->hash192, 16, data->rand192, - 16, data->hash256, 19, data->rand256); + 16, data->hash256, 16, data->rand256); } hci_dev_unlock(hdev); @@ -247,7 +247,7 @@ static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf, struct hci_dev *hdev = file->private_data; char buf[3]; - buf[0] = test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N'; + buf[0] = hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS) ? 'Y': 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); @@ -265,7 +265,7 @@ static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf, struct hci_dev *hdev = file->private_data; char buf[3]; - buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N'; + buf[0] = hci_dev_test_flag(hdev, HCI_SC_ONLY) ? 'Y': 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); @@ -679,7 +679,7 @@ static ssize_t force_static_address_read(struct file *file, struct hci_dev *hdev = file->private_data; char buf[3]; - buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N'; + buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ? 'Y': 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); @@ -704,10 +704,10 @@ static ssize_t force_static_address_write(struct file *file, if (strtobool(buf, &enable)) return -EINVAL; - if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags)) + if (enable == hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR)) return -EALREADY; - change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags); + hci_dev_change_flag(hdev, HCI_FORCE_STATIC_ADDR); return count; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 39653d46932b..62f92a508961 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -70,7 +70,7 @@ static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) if (status) return; - set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); } static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) @@ -82,7 +82,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) if (status) return; - clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); hci_conn_check_pending(hdev); } @@ -198,7 +198,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) return; /* Reset all non-persistent flags */ - hdev->dev_flags &= ~HCI_PERSISTENT_MASK; + hci_dev_clear_volatile_flags(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); @@ -265,7 +265,7 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_set_local_name_complete(hdev, sent, status); else if (!status) memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); @@ -282,8 +282,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) if (rp->status) return; - if (test_bit(HCI_SETUP, &hdev->dev_flags) || - test_bit(HCI_CONFIG, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG)) memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); } @@ -309,7 +309,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) clear_bit(HCI_AUTH, &hdev->flags); } - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_auth_enable_complete(hdev, status); hci_dev_unlock(hdev); @@ -404,7 +404,7 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) if (status == 0) memcpy(hdev->dev_class, sent, 3); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_set_class_of_dev_complete(hdev, sent, status); hci_dev_unlock(hdev); @@ -497,13 +497,13 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) hdev->features[1][0] &= ~LMP_HOST_SSP; } - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_ssp_enable_complete(hdev, sent->mode, status); else if (!status) { if (sent->mode) - set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_SSP_ENABLED); else - clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); } hci_dev_unlock(hdev); @@ -529,11 +529,11 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) hdev->features[1][0] &= ~LMP_HOST_SC; } - if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) { + if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) { if (sent->support) - set_bit(HCI_SC_ENABLED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_SC_ENABLED); else - clear_bit(HCI_SC_ENABLED, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_SC_ENABLED); } hci_dev_unlock(hdev); @@ -548,8 +548,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) if (rp->status) return; - if (test_bit(HCI_SETUP, &hdev->dev_flags) || - test_bit(HCI_CONFIG, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG)) { hdev->hci_ver = rp->hci_ver; hdev->hci_rev = __le16_to_cpu(rp->hci_rev); hdev->lmp_ver = rp->lmp_ver; @@ -568,8 +568,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, if (rp->status) return; - if (test_bit(HCI_SETUP, &hdev->dev_flags) || - test_bit(HCI_CONFIG, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG)) memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); } @@ -691,7 +691,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) if (test_bit(HCI_INIT, &hdev->flags)) bacpy(&hdev->bdaddr, &rp->bdaddr); - if (test_bit(HCI_SETUP, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_SETUP)) bacpy(&hdev->setup_addr, &rp->bdaddr); } @@ -900,7 +900,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); if (rp->status) @@ -926,7 +926,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, rp->status); @@ -985,7 +985,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); @@ -1001,7 +1001,7 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); @@ -1016,7 +1016,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); @@ -1032,7 +1032,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, rp->status); @@ -1109,7 +1109,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) if (*sent) { struct hci_conn *conn; - set_bit(HCI_LE_ADV, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_LE_ADV); conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); if (conn) @@ -1117,7 +1117,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) &conn->le_conn_timeout, conn->conn_timeout); } else { - clear_bit(HCI_LE_ADV, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LE_ADV); } hci_dev_unlock(hdev); @@ -1192,7 +1192,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, switch (cp->enable) { case LE_SCAN_ENABLE: - set_bit(HCI_LE_SCAN, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_LE_SCAN); if (hdev->le_scan_type == LE_SCAN_ACTIVE) clear_pending_adv_report(hdev); break; @@ -1217,7 +1217,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, */ cancel_delayed_work(&hdev->le_scan_disable); - clear_bit(HCI_LE_SCAN, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LE_SCAN); /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we * interrupted scanning due to a connect request. Mark @@ -1226,10 +1226,9 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, * been disabled because of active scanning, so * re-enable it again if necessary. */ - if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED, - &hdev->dev_flags)) + if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); - else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) && + else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && hdev->discovery.state == DISCOVERY_FINDING) mgmt_reenable_advertising(hdev); @@ -1388,11 +1387,11 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev, if (sent->le) { hdev->features[1][0] |= LMP_HOST_LE; - set_bit(HCI_LE_ENABLED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_LE_ENABLED); } else { hdev->features[1][0] &= ~LMP_HOST_LE; - clear_bit(HCI_LE_ENABLED, &hdev->dev_flags); - clear_bit(HCI_ADVERTISING, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LE_ENABLED); + hci_dev_clear_flag(hdev, HCI_ADVERTISING); } if (sent->simul) @@ -1769,7 +1768,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); if (!conn) @@ -2118,7 +2117,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ wake_up_bit(&hdev->flags, HCI_INQUIRY); - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; hci_dev_lock(hdev); @@ -2127,7 +2126,16 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) goto unlock; if (list_empty(&discov->resolve)) { - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + /* When BR/EDR inquiry is active and no LE scanning is in + * progress, then change discovery state to indicate completion. + * + * When running LE scanning and BR/EDR inquiry simultaneously + * and the LE scan already finished, then change the discovery + * state to indicate completion. + */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || + !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); goto unlock; } @@ -2136,7 +2144,16 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) e->name_state = NAME_PENDING; hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); } else { - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + /* When BR/EDR inquiry is active and no LE scanning is in + * progress, then change discovery state to indicate completion. + * + * When running LE scanning and BR/EDR inquiry simultaneously + * and the LE scan already finished, then change the discovery + * state to indicate completion. + */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || + !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } unlock: @@ -2154,7 +2171,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) if (!num_rsp) return; - if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) return; hci_dev_lock(hdev); @@ -2304,8 +2321,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) * connection. These features are only touched through mgmt so * only do the checks if HCI_MGMT is set. */ - if (test_bit(HCI_MGMT, &hdev->dev_flags) && - !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_MGMT) && + !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, BDADDR_BREDR)) { hci_reject_conn(hdev, &ev->bdaddr); @@ -2542,7 +2559,7 @@ static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto check_auth; if (ev->status == 0) @@ -2608,7 +2625,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) * whenever the encryption procedure fails. */ if (ev->status && conn->type == LE_LINK) - set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); @@ -2626,7 +2643,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) * connections that are not encrypted with AES-CCM * using a P-256 authenticated combination key. */ - if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); @@ -3331,11 +3348,11 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_conn_drop(conn); } - if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) && + if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); - } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { + } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { u8 secure; if (conn->pending_sec_level == BT_SECURITY_HIGH) @@ -3391,7 +3408,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s", hdev->name); - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; hci_dev_lock(hdev); @@ -3465,7 +3482,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); conn_set_key(conn, ev->key_type, conn->pin_length); - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, @@ -3487,7 +3504,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) * store_hint being 0). */ if (key->type == HCI_LK_DEBUG_COMBINATION && - !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) { + !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { list_del_rcu(&key->list); kfree_rcu(key, rcu); goto unlock; @@ -3570,7 +3587,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, if (!num_rsp) return; - if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) return; hci_dev_lock(hdev); @@ -3776,7 +3793,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, if (!num_rsp) return; - if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) return; hci_dev_lock(hdev); @@ -3794,7 +3811,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, data.rssi = info->rssi; data.ssp_mode = 0x01; - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) name_known = eir_has_data_type(info->data, sizeof(info->data), EIR_NAME_COMPLETE); @@ -3890,41 +3907,37 @@ static u8 bredr_oob_data_present(struct hci_conn *conn) if (!data) return 0x00; - if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) { - if (bredr_sc_enabled(hdev)) { - /* When Secure Connections is enabled, then just - * return the present value stored with the OOB - * data. The stored value contains the right present - * information. However it can only be trusted when - * not in Secure Connection Only mode. - */ - if (!test_bit(HCI_SC_ONLY, &hdev->dev_flags)) - return data->present; - - /* When Secure Connections Only mode is enabled, then - * the P-256 values are required. If they are not - * available, then do not declare that OOB data is - * present. - */ - if (!memcmp(data->rand256, ZERO_KEY, 16) || - !memcmp(data->hash256, ZERO_KEY, 16)) - return 0x00; - - return 0x02; - } + if (bredr_sc_enabled(hdev)) { + /* When Secure Connections is enabled, then just + * return the present value stored with the OOB + * data. The stored value contains the right present + * information. However it can only be trusted when + * not in Secure Connection Only mode. + */ + if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) + return data->present; - /* When Secure Connections is not enabled or actually - * not supported by the hardware, then check that if - * P-192 data values are present. + /* When Secure Connections Only mode is enabled, then + * the P-256 values are required. If they are not + * available, then do not declare that OOB data is + * present. */ - if (!memcmp(data->rand192, ZERO_KEY, 16) || - !memcmp(data->hash192, ZERO_KEY, 16)) + if (!memcmp(data->rand256, ZERO_KEY, 16) || + !memcmp(data->hash256, ZERO_KEY, 16)) return 0x00; - return 0x01; + return 0x02; } - return 0x00; + /* When Secure Connections is not enabled or actually + * not supported by the hardware, then check that if + * P-192 data values are present. + */ + if (!memcmp(data->rand192, ZERO_KEY, 16) || + !memcmp(data->hash192, ZERO_KEY, 16)) + return 0x00; + + return 0x01; } static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) @@ -3942,13 +3955,13 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_conn_hold(conn); - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; /* Allow pairing if we're pairable, the initiators of the * pairing or if the remote is not requesting bonding. */ - if (test_bit(HCI_BONDABLE, &hdev->dev_flags) || + if (hci_dev_test_flag(hdev, HCI_BONDABLE) || test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { struct hci_cp_io_capability_reply cp; @@ -3974,7 +3987,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) /* If we're not bondable, force one of the non-bondable * authentication requirement values. */ - if (!test_bit(HCI_BONDABLE, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) conn->auth_type &= HCI_AT_NO_BONDING_MITM; cp.authentication = conn->auth_type; @@ -4011,8 +4024,6 @@ static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->remote_cap = ev->capability; conn->remote_auth = ev->authentication; - if (ev->oob_data) - set_bit(HCI_CONN_REMOTE_OOB, &conn->flags); unlock: hci_dev_unlock(hdev); @@ -4029,7 +4040,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, hci_dev_lock(hdev); - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); @@ -4100,7 +4111,7 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev, BT_DBG("%s", hdev->name); - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); } @@ -4119,7 +4130,7 @@ static void hci_user_passkey_notify_evt(struct hci_dev *hdev, conn->passkey_notify = __le32_to_cpu(ev->passkey); conn->passkey_entered = 0; - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, conn->dst_type, conn->passkey_notify, conn->passkey_entered); @@ -4157,7 +4168,7 @@ static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) return; } - if (test_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_MGMT)) mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, conn->dst_type, conn->passkey_notify, conn->passkey_entered); @@ -4226,7 +4237,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, hci_dev_lock(hdev); - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_MGMT)) goto unlock; data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); @@ -4243,7 +4254,7 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, struct hci_cp_remote_oob_ext_data_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); - if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { memset(cp.hash192, 0, sizeof(cp.hash192)); memset(cp.rand192, 0, sizeof(cp.rand192)); } else { @@ -4409,7 +4420,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) /* All controllers implicitly stop advertising in the event of a * connection, so ensure that the state bit is cleared. */ - clear_bit(HCI_LE_ADV, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LE_ADV); conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); if (!conn) { @@ -4432,7 +4443,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) if (conn->out) { conn->resp_addr_type = ev->bdaddr_type; bacpy(&conn->resp_addr, &ev->bdaddr); - if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { conn->init_addr_type = ADDR_LE_DEV_RANDOM; bacpy(&conn->init_addr, &hdev->rpa); } else { @@ -4658,7 +4669,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* If the controller is not using resolvable random * addresses, then this report can be ignored. */ - if (!test_bit(HCI_PRIVACY, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) return; /* If the local IRK of the controller does not match diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index f857e765e081..55e096d20a0f 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -270,7 +270,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req) * and 0x01 (whitelist enabled) use the new filter policies * 0x02 (no whitelist) and 0x03 (whitelist enabled). */ - if (test_bit(HCI_PRIVACY, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_PRIVACY) && (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) filter_policy |= 0x02; @@ -304,10 +304,10 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) * In this kind of scenario skip the update and let the random * address be updated at the next cycle. */ - if (test_bit(HCI_LE_ADV, &hdev->dev_flags) || + if (hci_dev_test_flag(hdev, HCI_LE_ADV) || hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { BT_DBG("Deferring random address update"); - set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return; } @@ -324,12 +324,12 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy, * current RPA has expired or there is something else than * the current RPA in use, then generate a new one. */ - if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { int to; *own_addr_type = ADDR_LE_DEV_RANDOM; - if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) && + if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && !bacmp(&hdev->random_addr, &hdev->rpa)) return 0; @@ -383,9 +383,9 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy, * and a static address has been configured, then use that * address instead of the public BR/EDR address. */ - if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) || + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || - (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { *own_addr_type = ADDR_LE_DEV_RANDOM; if (bacmp(&hdev->static_addr, &hdev->random_addr)) @@ -425,7 +425,7 @@ void __hci_update_page_scan(struct hci_request *req) struct hci_dev *hdev = req->hdev; u8 scan; - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return; if (!hdev_is_powered(hdev)) @@ -434,7 +434,7 @@ void __hci_update_page_scan(struct hci_request *req) if (mgmt_powering_down(hdev)) return; - if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) || + if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || disconnected_whitelist_entries(hdev)) scan = SCAN_PAGE; else @@ -443,7 +443,7 @@ void __hci_update_page_scan(struct hci_request *req) if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE)) return; - if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) scan |= SCAN_INQUIRY; hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); @@ -471,14 +471,14 @@ void __hci_update_background_scan(struct hci_request *req) if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || - test_bit(HCI_SETUP, &hdev->dev_flags) || - test_bit(HCI_CONFIG, &hdev->dev_flags) || - test_bit(HCI_AUTO_OFF, &hdev->dev_flags) || - test_bit(HCI_UNREGISTER, &hdev->dev_flags)) + hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG) || + hci_dev_test_flag(hdev, HCI_AUTO_OFF) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) return; /* No point in doing scanning if LE support hasn't been enabled */ - if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; /* If discovery is active don't interfere with it */ @@ -502,7 +502,7 @@ void __hci_update_background_scan(struct hci_request *req) */ /* If controller is not scanning we are done. */ - if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; hci_req_add_le_scan_disable(req); @@ -524,7 +524,7 @@ void __hci_update_background_scan(struct hci_request *req) /* If controller is currently scanning, we stop it to ensure we * don't miss any advertising (due to duplicates filter). */ - if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) hci_req_add_le_scan_disable(req); hci_req_add_le_passive_scan(req); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index cb4bc4883350..85a44a7dc150 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -30,6 +30,12 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_mon.h> +#include <net/bluetooth/mgmt.h> + +#include "mgmt_util.h" + +static LIST_HEAD(mgmt_chan_list); +static DEFINE_MUTEX(mgmt_chan_list_lock); static atomic_t monitor_promisc = ATOMIC_INIT(0); @@ -44,8 +50,29 @@ struct hci_pinfo { struct hci_filter filter; __u32 cmsg_mask; unsigned short channel; + unsigned long flags; }; +void hci_sock_set_flag(struct sock *sk, int nr) +{ + set_bit(nr, &hci_pi(sk)->flags); +} + +void hci_sock_clear_flag(struct sock *sk, int nr) +{ + clear_bit(nr, &hci_pi(sk)->flags); +} + +int hci_sock_test_flag(struct sock *sk, int nr) +{ + return test_bit(nr, &hci_pi(sk)->flags); +} + +unsigned short hci_sock_get_channel(struct sock *sk) +{ + return hci_pi(sk)->channel; +} + static inline int hci_test_bit(int nr, const void *addr) { return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); @@ -185,7 +212,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) /* Send frame to sockets with specific channel */ void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, - struct sock *skip_sk) + int flag, struct sock *skip_sk) { struct sock *sk; @@ -196,6 +223,10 @@ void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, sk_for_each(sk, &hci_sk_list.head) { struct sk_buff *nskb; + /* Ignore socket without the flag set */ + if (!hci_sock_test_flag(sk, flag)) + continue; + /* Skip the original socket */ if (sk == skip_sk) continue; @@ -263,7 +294,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) hdr->index = cpu_to_le16(hdev->id); hdr->len = cpu_to_le16(skb->len); - hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL); + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, + HCI_SOCK_TRUSTED, NULL); kfree_skb(skb_copy); } @@ -370,7 +402,8 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) skb = create_monitor_event(hdev, event); if (skb) { - hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL); + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); kfree_skb(skb); } } @@ -401,6 +434,56 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) } } +static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel) +{ + struct hci_mgmt_chan *c; + + list_for_each_entry(c, &mgmt_chan_list, list) { + if (c->channel == channel) + return c; + } + + return NULL; +} + +static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel) +{ + struct hci_mgmt_chan *c; + + mutex_lock(&mgmt_chan_list_lock); + c = __hci_mgmt_chan_find(channel); + mutex_unlock(&mgmt_chan_list_lock); + + return c; +} + +int hci_mgmt_chan_register(struct hci_mgmt_chan *c) +{ + if (c->channel < HCI_CHANNEL_CONTROL) + return -EINVAL; + + mutex_lock(&mgmt_chan_list_lock); + if (__hci_mgmt_chan_find(c->channel)) { + mutex_unlock(&mgmt_chan_list_lock); + return -EALREADY; + } + + list_add_tail(&c->list, &mgmt_chan_list); + + mutex_unlock(&mgmt_chan_list_lock); + + return 0; +} +EXPORT_SYMBOL(hci_mgmt_chan_register); + +void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c) +{ + mutex_lock(&mgmt_chan_list_lock); + list_del(&c->list); + mutex_unlock(&mgmt_chan_list_lock); +} +EXPORT_SYMBOL(hci_mgmt_chan_unregister); + static int hci_sock_release(struct socket *sock) { struct sock *sk = sock->sk; @@ -421,7 +504,7 @@ static int hci_sock_release(struct socket *sock) if (hdev) { if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { mgmt_index_added(hdev); - clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); hci_dev_close(hdev->id); } @@ -481,10 +564,10 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, if (!hdev) return -EBADFD; - if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return -EOPNOTSUPP; if (hdev->dev_type != HCI_BREDR) @@ -660,14 +743,14 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, if (test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || - test_bit(HCI_SETUP, &hdev->dev_flags) || - test_bit(HCI_CONFIG, &hdev->dev_flags)) { + hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG)) { err = -EBUSY; hci_dev_put(hdev); goto done; } - if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { + if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) { err = -EUSERS; hci_dev_put(hdev); goto done; @@ -677,7 +760,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, err = hci_dev_open(hdev->id); if (err) { - clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); mgmt_index_added(hdev); hci_dev_put(hdev); goto done; @@ -688,38 +771,62 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, hci_pi(sk)->hdev = hdev; break; - case HCI_CHANNEL_CONTROL: + case HCI_CHANNEL_MONITOR: if (haddr.hci_dev != HCI_DEV_NONE) { err = -EINVAL; goto done; } - if (!capable(CAP_NET_ADMIN)) { + if (!capable(CAP_NET_RAW)) { err = -EPERM; goto done; } + /* The monitor interface is restricted to CAP_NET_RAW + * capabilities and with that implicitly trusted. + */ + hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); + + send_monitor_replay(sk); + + atomic_inc(&monitor_promisc); break; - case HCI_CHANNEL_MONITOR: - if (haddr.hci_dev != HCI_DEV_NONE) { + default: + if (!hci_mgmt_chan_find(haddr.hci_channel)) { err = -EINVAL; goto done; } - if (!capable(CAP_NET_RAW)) { - err = -EPERM; + if (haddr.hci_dev != HCI_DEV_NONE) { + err = -EINVAL; goto done; } - send_monitor_replay(sk); - - atomic_inc(&monitor_promisc); + /* Users with CAP_NET_ADMIN capabilities are allowed + * access to all management commands and events. For + * untrusted users the interface is restricted and + * also only untrusted events are sent. + */ + if (capable(CAP_NET_ADMIN)) + hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); + + /* At the moment the index and unconfigured index events + * are enabled unconditionally. Setting them on each + * socket when binding keeps this functionality. They + * however might be cleared later and then sending of these + * events will be disabled, but that is then intentional. + * + * This also enables generic events that are safe to be + * received by untrusted users. Example for such events + * are changes to settings, class of device, name etc. + */ + if (haddr.hci_channel == HCI_CHANNEL_CONTROL) { + hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS); + hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); + hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS); + } break; - - default: - err = -EINVAL; - goto done; } @@ -833,10 +940,13 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, hci_sock_cmsg(sk, msg, skb); break; case HCI_CHANNEL_USER: - case HCI_CHANNEL_CONTROL: case HCI_CHANNEL_MONITOR: sock_recv_timestamp(msg, sk, skb); break; + default: + if (hci_mgmt_chan_find(hci_pi(sk)->channel)) + sock_recv_timestamp(msg, sk, skb); + break; } skb_free_datagram(sk, skb); @@ -844,10 +954,122 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, return err ? : copied; } +static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, + struct msghdr *msg, size_t msglen) +{ + void *buf; + u8 *cp; + struct mgmt_hdr *hdr; + u16 opcode, index, len; + struct hci_dev *hdev = NULL; + const struct hci_mgmt_handler *handler; + bool var_len, no_hdev; + int err; + + BT_DBG("got %zu bytes", msglen); + + if (msglen < sizeof(*hdr)) + return -EINVAL; + + buf = kmalloc(msglen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (memcpy_from_msg(buf, msg, msglen)) { + err = -EFAULT; + goto done; + } + + hdr = buf; + opcode = __le16_to_cpu(hdr->opcode); + index = __le16_to_cpu(hdr->index); + len = __le16_to_cpu(hdr->len); + + if (len != msglen - sizeof(*hdr)) { + err = -EINVAL; + goto done; + } + + if (opcode >= chan->handler_count || + chan->handlers[opcode].func == NULL) { + BT_DBG("Unknown op %u", opcode); + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_UNKNOWN_COMMAND); + goto done; + } + + handler = &chan->handlers[opcode]; + + if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) && + !(handler->flags & HCI_MGMT_UNTRUSTED)) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_PERMISSION_DENIED); + goto done; + } + + if (index != MGMT_INDEX_NONE) { + hdev = hci_dev_get(index); + if (!hdev) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_INVALID_INDEX); + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG) || + hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_INVALID_INDEX); + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !(handler->flags & HCI_MGMT_UNCONFIGURED)) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_INVALID_INDEX); + goto done; + } + } + + no_hdev = (handler->flags & HCI_MGMT_NO_HDEV); + if (no_hdev != !hdev) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_INVALID_INDEX); + goto done; + } + + var_len = (handler->flags & HCI_MGMT_VAR_LEN); + if ((var_len && len < handler->data_len) || + (!var_len && len != handler->data_len)) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_INVALID_PARAMS); + goto done; + } + + if (hdev && chan->hdev_init) + chan->hdev_init(sk, hdev); + + cp = buf + sizeof(*hdr); + + err = handler->func(sk, hdev, cp, len); + if (err < 0) + goto done; + + err = msglen; + +done: + if (hdev) + hci_dev_put(hdev); + + kfree(buf); + return err; +} + static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; + struct hci_mgmt_chan *chan; struct hci_dev *hdev; struct sk_buff *skb; int err; @@ -869,14 +1091,18 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, case HCI_CHANNEL_RAW: case HCI_CHANNEL_USER: break; - case HCI_CHANNEL_CONTROL: - err = mgmt_control(sk, msg, len); - goto done; case HCI_CHANNEL_MONITOR: err = -EOPNOTSUPP; goto done; default: - err = -EINVAL; + mutex_lock(&mgmt_chan_list_lock); + chan = __hci_mgmt_chan_find(hci_pi(sk)->channel); + if (chan) + err = hci_mgmt_cmd(chan, sk, msg, len); + else + err = -EINVAL; + + mutex_unlock(&mgmt_chan_list_lock); goto done; } diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 91c682846bcf..d69861c89bb5 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -3900,7 +3900,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn, return -EPROTO; hci_dev_lock(hdev); - if (test_bit(HCI_MGMT, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_MGMT) && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) mgmt_device_connected(hdev, hcon, 0, NULL, 0); hci_dev_unlock(hdev); @@ -6987,12 +6987,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; if (hcon->type == ACL_LINK && - test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags)) + hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED)) conn->local_fixed_chan |= L2CAP_FC_A2MP; - if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) && + if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) && (bredr_sc_enabled(hcon->hdev) || - test_bit(HCI_FORCE_BREDR_SMP, &hcon->hdev->dbg_flags))) + hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP))) conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; mutex_init(&conn->ident_lock); @@ -7112,7 +7112,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, else dst_type = ADDR_LE_DEV_RANDOM; - if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) role = HCI_ROLE_SLAVE; else role = HCI_ROLE_MASTER; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 1e4635a3374d..f3a957905193 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -35,9 +35,10 @@ #include "hci_request.h" #include "smp.h" +#include "mgmt_util.h" #define MGMT_VERSION 1 -#define MGMT_REVISION 8 +#define MGMT_REVISION 9 static const u16 mgmt_commands[] = { MGMT_OP_READ_INDEX_LIST, @@ -96,6 +97,9 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_OP_SET_PUBLIC_ADDRESS, MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, + MGMT_OP_READ_EXT_INDEX_LIST, + MGMT_OP_READ_ADV_FEATURES, }; static const u16 mgmt_events[] = { @@ -128,6 +132,9 @@ static const u16 mgmt_events[] = { MGMT_EV_UNCONF_INDEX_ADDED, MGMT_EV_UNCONF_INDEX_REMOVED, MGMT_EV_NEW_CONFIG_OPTIONS, + MGMT_EV_EXT_INDEX_ADDED, + MGMT_EV_EXT_INDEX_REMOVED, + MGMT_EV_LOCAL_OOB_DATA_UPDATED, }; #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) @@ -135,17 +142,6 @@ static const u16 mgmt_events[] = { #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ "\x00\x00\x00\x00\x00\x00\x00\x00" -struct pending_cmd { - struct list_head list; - u16 opcode; - int index; - void *param; - size_t param_len; - struct sock *sk; - void *user_data; - int (*cmd_complete)(struct pending_cmd *cmd, u8 status); -}; - /* HCI to MGMT error code conversion table */ static u8 mgmt_status_table[] = { MGMT_STATUS_SUCCESS, @@ -219,98 +215,32 @@ static u8 mgmt_status(u8 hci_status) return MGMT_STATUS_FAILED; } -static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len, - struct sock *skip_sk) +static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data, + u16 len, int flag) { - struct sk_buff *skb; - struct mgmt_hdr *hdr; - - skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - hdr = (void *) skb_put(skb, sizeof(*hdr)); - hdr->opcode = cpu_to_le16(event); - if (hdev) - hdr->index = cpu_to_le16(hdev->id); - else - hdr->index = cpu_to_le16(MGMT_INDEX_NONE); - hdr->len = cpu_to_le16(data_len); - - if (data) - memcpy(skb_put(skb, data_len), data, data_len); - - /* Time stamp */ - __net_timestamp(skb); - - hci_send_to_channel(HCI_CHANNEL_CONTROL, skb, skip_sk); - kfree_skb(skb); - - return 0; + return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, + flag, NULL); } -static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) +static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data, + u16 len, int flag, struct sock *skip_sk) { - struct sk_buff *skb; - struct mgmt_hdr *hdr; - struct mgmt_ev_cmd_status *ev; - int err; - - BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); - - skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); - if (!skb) - return -ENOMEM; - - hdr = (void *) skb_put(skb, sizeof(*hdr)); - - hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); - hdr->index = cpu_to_le16(index); - hdr->len = cpu_to_le16(sizeof(*ev)); - - ev = (void *) skb_put(skb, sizeof(*ev)); - ev->status = status; - ev->opcode = cpu_to_le16(cmd); - - err = sock_queue_rcv_skb(sk, skb); - if (err < 0) - kfree_skb(skb); - - return err; + return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, + flag, skip_sk); } -static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, - void *rp, size_t rp_len) +static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data, + u16 len, struct sock *skip_sk) { - struct sk_buff *skb; - struct mgmt_hdr *hdr; - struct mgmt_ev_cmd_complete *ev; - int err; - - BT_DBG("sock %p", sk); - - skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - hdr = (void *) skb_put(skb, sizeof(*hdr)); - - hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); - hdr->index = cpu_to_le16(index); - hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); - - ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); - ev->opcode = cpu_to_le16(cmd); - ev->status = status; - - if (rp) - memcpy(ev->data, rp, rp_len); - - err = sock_queue_rcv_skb(sk, skb); - if (err < 0) - kfree_skb(skb); + return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, + HCI_MGMT_GENERIC_EVENTS, skip_sk); +} - return err; +static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len, + struct sock *skip_sk) +{ + return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, + HCI_SOCK_TRUSTED, skip_sk); } static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, @@ -323,8 +253,8 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, rp.version = MGMT_VERSION; rp.revision = cpu_to_le16(MGMT_REVISION); - return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp, - sizeof(rp)); + return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, + &rp, sizeof(rp)); } static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, @@ -354,8 +284,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, for (i = 0; i < num_events; i++, opcode++) put_unaligned_le16(mgmt_events[i], opcode); - err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp, - rp_size); + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, + rp, rp_size); kfree(rp); return err; @@ -377,7 +307,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (d->dev_type == HCI_BREDR && - !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) + !hci_dev_test_flag(d, HCI_UNCONFIGURED)) count++; } @@ -390,9 +320,9 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, count = 0; list_for_each_entry(d, &hci_dev_list, list) { - if (test_bit(HCI_SETUP, &d->dev_flags) || - test_bit(HCI_CONFIG, &d->dev_flags) || - test_bit(HCI_USER_CHANNEL, &d->dev_flags)) + if (hci_dev_test_flag(d, HCI_SETUP) || + hci_dev_test_flag(d, HCI_CONFIG) || + hci_dev_test_flag(d, HCI_USER_CHANNEL)) continue; /* Devices marked as raw-only are neither configured @@ -402,7 +332,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, continue; if (d->dev_type == HCI_BREDR && - !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) { + !hci_dev_test_flag(d, HCI_UNCONFIGURED)) { rp->index[count++] = cpu_to_le16(d->id); BT_DBG("Added hci%u", d->id); } @@ -413,8 +343,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, read_unlock(&hci_dev_list_lock); - err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp, - rp_len); + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, + 0, rp, rp_len); kfree(rp); @@ -437,7 +367,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (d->dev_type == HCI_BREDR && - test_bit(HCI_UNCONFIGURED, &d->dev_flags)) + hci_dev_test_flag(d, HCI_UNCONFIGURED)) count++; } @@ -450,9 +380,9 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, count = 0; list_for_each_entry(d, &hci_dev_list, list) { - if (test_bit(HCI_SETUP, &d->dev_flags) || - test_bit(HCI_CONFIG, &d->dev_flags) || - test_bit(HCI_USER_CHANNEL, &d->dev_flags)) + if (hci_dev_test_flag(d, HCI_SETUP) || + hci_dev_test_flag(d, HCI_CONFIG) || + hci_dev_test_flag(d, HCI_USER_CHANNEL)) continue; /* Devices marked as raw-only are neither configured @@ -462,7 +392,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, continue; if (d->dev_type == HCI_BREDR && - test_bit(HCI_UNCONFIGURED, &d->dev_flags)) { + hci_dev_test_flag(d, HCI_UNCONFIGURED)) { rp->index[count++] = cpu_to_le16(d->id); BT_DBG("Added hci%u", d->id); } @@ -473,8 +403,84 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, read_unlock(&hci_dev_list_lock); - err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST, - 0, rp, rp_len); + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, + MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len); + + kfree(rp); + + return err; +} + +static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_rp_read_ext_index_list *rp; + struct hci_dev *d; + size_t rp_len; + u16 count; + int err; + + BT_DBG("sock %p", sk); + + read_lock(&hci_dev_list_lock); + + count = 0; + list_for_each_entry(d, &hci_dev_list, list) { + if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP) + count++; + } + + rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count); + rp = kmalloc(rp_len, GFP_ATOMIC); + if (!rp) { + read_unlock(&hci_dev_list_lock); + return -ENOMEM; + } + + count = 0; + list_for_each_entry(d, &hci_dev_list, list) { + if (hci_dev_test_flag(d, HCI_SETUP) || + hci_dev_test_flag(d, HCI_CONFIG) || + hci_dev_test_flag(d, HCI_USER_CHANNEL)) + continue; + + /* Devices marked as raw-only are neither configured + * nor unconfigured controllers. + */ + if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) + continue; + + if (d->dev_type == HCI_BREDR) { + if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) + rp->entry[count].type = 0x01; + else + rp->entry[count].type = 0x00; + } else if (d->dev_type == HCI_AMP) { + rp->entry[count].type = 0x02; + } else { + continue; + } + + rp->entry[count].bus = d->bus; + rp->entry[count++].index = cpu_to_le16(d->id); + BT_DBG("Added hci%u", d->id); + } + + rp->num_controllers = cpu_to_le16(count); + rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count); + + read_unlock(&hci_dev_list_lock); + + /* If this command is called at least once, then all the + * default index and unconfigured index events are disabled + * and from now on only extended index events are used. + */ + hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS); + hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS); + hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); + + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, + MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len); kfree(rp); @@ -484,7 +490,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, static bool is_configured(struct hci_dev *hdev) { if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && - !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags)) + !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED)) return false; if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) && @@ -499,7 +505,7 @@ static __le32 get_missing_options(struct hci_dev *hdev) u32 options = 0; if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && - !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags)) + !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED)) options |= MGMT_OPTION_EXTERNAL_CONFIG; if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) && @@ -513,16 +519,16 @@ static int new_options(struct hci_dev *hdev, struct sock *skip) { __le32 options = get_missing_options(hdev); - return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options, - sizeof(options), skip); + return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options, + sizeof(options), skip); } static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) { __le32 options = get_missing_options(hdev); - return cmd_complete(sk, hdev->id, opcode, 0, &options, - sizeof(options)); + return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options, + sizeof(options)); } static int read_config_info(struct sock *sk, struct hci_dev *hdev, @@ -549,8 +555,8 @@ static int read_config_info(struct sock *sk, struct hci_dev *hdev, hci_dev_unlock(hdev); - return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp, - sizeof(rp)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, + &rp, sizeof(rp)); } static u32 get_supported_settings(struct hci_dev *hdev) @@ -583,6 +589,7 @@ static u32 get_supported_settings(struct hci_dev *hdev) settings |= MGMT_SETTING_ADVERTISING; settings |= MGMT_SETTING_SECURE_CONN; settings |= MGMT_SETTING_PRIVACY; + settings |= MGMT_SETTING_STATIC_ADDRESS; } if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || @@ -599,45 +606,64 @@ static u32 get_current_settings(struct hci_dev *hdev) if (hdev_is_powered(hdev)) settings |= MGMT_SETTING_POWERED; - if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_CONNECTABLE)) settings |= MGMT_SETTING_CONNECTABLE; - if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) settings |= MGMT_SETTING_FAST_CONNECTABLE; - if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) settings |= MGMT_SETTING_DISCOVERABLE; - if (test_bit(HCI_BONDABLE, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_BONDABLE)) settings |= MGMT_SETTING_BONDABLE; - if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) settings |= MGMT_SETTING_BREDR; - if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) settings |= MGMT_SETTING_LE; - if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) settings |= MGMT_SETTING_LINK_SECURITY; - if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) settings |= MGMT_SETTING_SSP; - if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_HS_ENABLED)) settings |= MGMT_SETTING_HS; - if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) settings |= MGMT_SETTING_ADVERTISING; - if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) settings |= MGMT_SETTING_SECURE_CONN; - if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) settings |= MGMT_SETTING_DEBUG_KEYS; - if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) settings |= MGMT_SETTING_PRIVACY; + /* The current setting for static address has two purposes. The + * first is to indicate if the static address will be used and + * the second is to indicate if it is actually set. + * + * This means if the static address is not configured, this flag + * will never bet set. If the address is configured, then if the + * address is actually used decides if the flag is set or not. + * + * For single mode LE only controllers and dual-mode controllers + * with BR/EDR disabled, the existence of the static address will + * be evaluated. + */ + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) || + !bacmp(&hdev->bdaddr, BDADDR_ANY)) { + if (bacmp(&hdev->static_addr, BDADDR_ANY)) + settings |= MGMT_SETTING_STATIC_ADDRESS; + } + return settings; } @@ -751,32 +777,16 @@ static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) return ptr; } -static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev) +static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev) { - struct pending_cmd *cmd; - - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { - if (cmd->opcode == opcode) - return cmd; - } - - return NULL; + return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev); } -static struct pending_cmd *mgmt_pending_find_data(u16 opcode, +static struct mgmt_pending_cmd *pending_find_data(u16 opcode, struct hci_dev *hdev, const void *data) { - struct pending_cmd *cmd; - - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { - if (cmd->user_data != data) - continue; - if (cmd->opcode == opcode) - return cmd; - } - - return NULL; + return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data); } static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) @@ -811,7 +821,7 @@ static void update_scan_rsp_data(struct hci_request *req) struct hci_cp_le_set_scan_rsp_data cp; u8 len; - if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; memset(&cp, 0, sizeof(cp)); @@ -832,12 +842,12 @@ static void update_scan_rsp_data(struct hci_request *req) static u8 get_adv_discov_flags(struct hci_dev *hdev) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; /* If there's a pending mgmt command the flags will not yet have * their final values, so check for this first. */ - cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); + cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); if (cmd) { struct mgmt_mode *cp = cmd->param; if (cp->val == 0x01) @@ -845,9 +855,9 @@ static u8 get_adv_discov_flags(struct hci_dev *hdev) else if (cp->val == 0x02) return LE_AD_LIMITED; } else { - if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) return LE_AD_LIMITED; - else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) + else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) return LE_AD_GENERAL; } @@ -860,7 +870,7 @@ static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr) flags |= get_adv_discov_flags(hdev); - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) flags |= LE_AD_NO_BREDR; if (flags) { @@ -892,7 +902,7 @@ static void update_adv_data(struct hci_request *req) struct hci_cp_le_set_adv_data cp; u8 len; - if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; memset(&cp, 0, sizeof(cp)); @@ -980,10 +990,10 @@ static void update_eir(struct hci_request *req) if (!lmp_ext_inq_capable(hdev)) return; - if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return; - if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return; memset(&cp, 0, sizeof(cp)); @@ -1019,17 +1029,17 @@ static void update_class(struct hci_request *req) if (!hdev_is_powered(hdev)) return; - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return; - if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return; cod[0] = hdev->minor_class; cod[1] = hdev->major_class; cod[2] = get_service_classes(hdev); - if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) cod[1] |= 0x20; if (memcmp(cod, hdev->dev_class, 3) == 0) @@ -1040,18 +1050,18 @@ static void update_class(struct hci_request *req) static bool get_connectable(struct hci_dev *hdev) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; /* If there's a pending mgmt command the flag will not yet have * it's final value, so check for this first. */ - cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); + cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev); if (cmd) { struct mgmt_mode *cp = cmd->param; return cp->val; } - return test_bit(HCI_CONNECTABLE, &hdev->dev_flags); + return hci_dev_test_flag(hdev, HCI_CONNECTABLE); } static void disable_advertising(struct hci_request *req) @@ -1071,7 +1081,7 @@ static void enable_advertising(struct hci_request *req) if (hci_conn_num(hdev, LE_LINK) > 0) return; - if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) disable_advertising(req); /* Clear the HCI_LE_ADV bit temporarily so that the @@ -1079,9 +1089,12 @@ static void enable_advertising(struct hci_request *req) * and write a new random address. The flag will be set back on * as soon as the SET_ADV_ENABLE HCI command completes. */ - clear_bit(HCI_LE_ADV, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LE_ADV); - connectable = get_connectable(hdev); + if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) + connectable = true; + else + connectable = get_connectable(hdev); /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a @@ -1108,7 +1121,7 @@ static void service_cache_off(struct work_struct *work) service_cache.work); struct hci_request req; - if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) + if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) return; hci_req_init(&req, hdev); @@ -1131,9 +1144,9 @@ static void rpa_expired(struct work_struct *work) BT_DBG(""); - set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); - if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_ADVERTISING)) return; /* The generation of a new RPA and programming it into the @@ -1146,7 +1159,7 @@ static void rpa_expired(struct work_struct *work) static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) { - if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags)) + if (hci_dev_test_and_set_flag(hdev, HCI_MGMT)) return; INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); @@ -1157,7 +1170,7 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) * for mgmt we require user-space to explicitly enable * it */ - clear_bit(HCI_BONDABLE, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_BONDABLE); } static int read_controller_info(struct sock *sk, struct hci_dev *hdev, @@ -1186,73 +1199,16 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev, hci_dev_unlock(hdev); - return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp, - sizeof(rp)); -} - -static void mgmt_pending_free(struct pending_cmd *cmd) -{ - sock_put(cmd->sk); - kfree(cmd->param); - kfree(cmd); -} - -static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, - struct hci_dev *hdev, void *data, - u16 len) -{ - struct pending_cmd *cmd; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return NULL; - - cmd->opcode = opcode; - cmd->index = hdev->id; - - cmd->param = kmemdup(data, len, GFP_KERNEL); - if (!cmd->param) { - kfree(cmd); - return NULL; - } - - cmd->param_len = len; - - cmd->sk = sk; - sock_hold(sk); - - list_add(&cmd->list, &hdev->mgmt_pending); - - return cmd; -} - -static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, - void (*cb)(struct pending_cmd *cmd, - void *data), - void *data) -{ - struct pending_cmd *cmd, *tmp; - - list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { - if (opcode > 0 && cmd->opcode != opcode) - continue; - - cb(cmd, data); - } -} - -static void mgmt_pending_remove(struct pending_cmd *cmd) -{ - list_del(&cmd->list); - mgmt_pending_free(cmd); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp, + sizeof(rp)); } static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) { __le32 settings = cpu_to_le32(get_current_settings(hdev)); - return cmd_complete(sk, hdev->id, opcode, 0, &settings, - sizeof(settings)); + return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings, + sizeof(settings)); } static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode) @@ -1273,9 +1229,10 @@ static bool hci_stop_discovery(struct hci_request *req) switch (hdev->discovery.state) { case DISCOVERY_FINDING: - if (test_bit(HCI_INQUIRY, &hdev->flags)) { + if (test_bit(HCI_INQUIRY, &hdev->flags)) hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); - } else { + + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_delayed_work(&hdev->le_scan_disable); hci_req_add_le_scan_disable(req); } @@ -1296,7 +1253,7 @@ static bool hci_stop_discovery(struct hci_request *req) default: /* Passive scanning */ - if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_req_add_le_scan_disable(req); return true; } @@ -1322,7 +1279,7 @@ static int clean_up_hci_state(struct hci_dev *hdev) hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } - if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) disable_advertising(&req); discov_stopped = hci_stop_discovery(&req); @@ -1370,24 +1327,24 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; int err; BT_DBG("request for %s", hdev->name); if (cp->val != 0x00 && cp->val != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); - if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_POWERED, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, + MGMT_STATUS_BUSY); goto failed; } - if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { + if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { cancel_delayed_work(&hdev->power_off); if (cp->val) { @@ -1434,11 +1391,10 @@ failed: static int new_settings(struct hci_dev *hdev, struct sock *skip) { - __le32 ev; - - ev = cpu_to_le32(get_current_settings(hdev)); + __le32 ev = cpu_to_le32(get_current_settings(hdev)); - return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip); + return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, + sizeof(ev), skip); } int mgmt_new_settings(struct hci_dev *hdev) @@ -1452,7 +1408,7 @@ struct cmd_lookup { u8 mgmt_status; }; -static void settings_rsp(struct pending_cmd *cmd, void *data) +static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct cmd_lookup *match = data; @@ -1468,15 +1424,15 @@ static void settings_rsp(struct pending_cmd *cmd, void *data) mgmt_pending_free(cmd); } -static void cmd_status_rsp(struct pending_cmd *cmd, void *data) +static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data) { u8 *status = data; - cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); mgmt_pending_remove(cmd); } -static void cmd_complete_rsp(struct pending_cmd *cmd, void *data) +static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) { if (cmd->cmd_complete) { u8 *status = data; @@ -1490,23 +1446,23 @@ static void cmd_complete_rsp(struct pending_cmd *cmd, void *data) cmd_status_rsp(cmd, data); } -static int generic_cmd_complete(struct pending_cmd *cmd, u8 status) +static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { - return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, - cmd->param, cmd->param_len); + return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + cmd->param, cmd->param_len); } -static int addr_cmd_complete(struct pending_cmd *cmd, u8 status) +static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { - return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, - sizeof(struct mgmt_addr_info)); + return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + cmd->param, sizeof(struct mgmt_addr_info)); } static u8 mgmt_bredr_support(struct hci_dev *hdev) { if (!lmp_bredr_capable(hdev)) return MGMT_STATUS_NOT_SUPPORTED; - else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) + else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return MGMT_STATUS_REJECTED; else return MGMT_STATUS_SUCCESS; @@ -1516,7 +1472,7 @@ static u8 mgmt_le_support(struct hci_dev *hdev) { if (!lmp_le_capable(hdev)) return MGMT_STATUS_NOT_SUPPORTED; - else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) + else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return MGMT_STATUS_REJECTED; else return MGMT_STATUS_SUCCESS; @@ -1525,7 +1481,7 @@ static u8 mgmt_le_support(struct hci_dev *hdev) static void set_discoverable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct mgmt_mode *cp; struct hci_request req; bool changed; @@ -1534,21 +1490,20 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status, hci_dev_lock(hdev); - cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); + cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); if (!cmd) goto unlock; if (status) { u8 mgmt_err = mgmt_status(status); - cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); - clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); goto remove_cmd; } cp = cmd->param; if (cp->val) { - changed = !test_and_set_bit(HCI_DISCOVERABLE, - &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE); if (hdev->discov_timeout > 0) { int to = msecs_to_jiffies(hdev->discov_timeout * 1000); @@ -1556,8 +1511,7 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status, to); } } else { - changed = test_and_clear_bit(HCI_DISCOVERABLE, - &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE); } send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); @@ -1586,7 +1540,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_discoverable *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; u16 timeout; u8 scan; @@ -1594,14 +1548,14 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, BT_DBG("request for %s", hdev->name); - if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && - !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, - MGMT_STATUS_REJECTED); + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) && + !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) - return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_INVALID_PARAMS); timeout = __le16_to_cpu(cp->timeout); @@ -1610,27 +1564,27 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, */ if ((cp->val == 0x00 && timeout > 0) || (cp->val == 0x02 && timeout == 0)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev) && timeout > 0) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, - MGMT_STATUS_NOT_POWERED); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_NOT_POWERED); goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || + pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_BUSY); goto failed; } - if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, - MGMT_STATUS_REJECTED); + if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_REJECTED); goto failed; } @@ -1641,8 +1595,8 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, * not a valid operation since it requires a timeout * and so no need to check HCI_LIMITED_DISCOVERABLE. */ - if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) { - change_bit(HCI_DISCOVERABLE, &hdev->dev_flags); + if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) { + hci_dev_change_flag(hdev, HCI_DISCOVERABLE); changed = true; } @@ -1660,9 +1614,9 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, * value with the new value. And if only the timeout gets updated, * then no need for any HCI transactions. */ - if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) && - (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE, - &hdev->dev_flags)) { + if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && + (cp->val == 0x02) == hci_dev_test_flag(hdev, + HCI_LIMITED_DISCOVERABLE)) { cancel_delayed_work(&hdev->discov_off); hdev->discov_timeout = timeout; @@ -1691,16 +1645,16 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, /* Limited discoverable mode */ if (cp->val == 0x02) - set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE); else - clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); hci_req_init(&req, hdev); /* The procedure for LE-only controllers is much simpler - just * update the advertising data. */ - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) goto update_ad; scan = SCAN_PAGE; @@ -1730,7 +1684,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, scan |= SCAN_INQUIRY; } else { - clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); } hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); @@ -1753,7 +1707,7 @@ static void write_fast_connectable(struct hci_request *req, bool enable) struct hci_cp_write_page_scan_activity acp; u8 type; - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return; if (hdev->hci_ver < BLUETOOTH_VER_1_2) @@ -1785,7 +1739,7 @@ static void write_fast_connectable(struct hci_request *req, bool enable) static void set_connectable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct mgmt_mode *cp; bool conn_changed, discov_changed; @@ -1793,26 +1747,26 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status, hci_dev_lock(hdev); - cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); + cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev); if (!cmd) goto unlock; if (status) { u8 mgmt_err = mgmt_status(status); - cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); goto remove_cmd; } cp = cmd->param; if (cp->val) { - conn_changed = !test_and_set_bit(HCI_CONNECTABLE, - &hdev->dev_flags); + conn_changed = !hci_dev_test_and_set_flag(hdev, + HCI_CONNECTABLE); discov_changed = false; } else { - conn_changed = test_and_clear_bit(HCI_CONNECTABLE, - &hdev->dev_flags); - discov_changed = test_and_clear_bit(HCI_DISCOVERABLE, - &hdev->dev_flags); + conn_changed = hci_dev_test_and_clear_flag(hdev, + HCI_CONNECTABLE); + discov_changed = hci_dev_test_and_clear_flag(hdev, + HCI_DISCOVERABLE); } send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); @@ -1838,14 +1792,14 @@ static int set_connectable_update_settings(struct hci_dev *hdev, bool changed = false; int err; - if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) + if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE)) changed = true; if (val) { - set_bit(HCI_CONNECTABLE, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_CONNECTABLE); } else { - clear_bit(HCI_CONNECTABLE, &hdev->dev_flags); - clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_CONNECTABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); } err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); @@ -1865,21 +1819,21 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; u8 scan; int err; BT_DBG("request for %s", hdev->name); - if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && - !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, - MGMT_STATUS_REJECTED); + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) && + !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, + MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); @@ -1888,10 +1842,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || + pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, + MGMT_STATUS_BUSY); goto failed; } @@ -1907,10 +1861,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, * by-product of disabling connectable, we need to update the * advertising flags. */ - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { if (!cp->val) { - clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); - clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); } update_adv_data(&req); } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) { @@ -1939,17 +1893,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, } no_scan_update: - /* If we're going from non-connectable to connectable or - * vice-versa when fast connectable is enabled ensure that fast - * connectable gets disabled. write_fast_connectable won't do - * anything if the page scan parameters are already what they - * should be. - */ - if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) - write_fast_connectable(&req, false); - /* Update the advertising parameters if necessary */ - if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) enable_advertising(&req); err = hci_req_run(&req, set_connectable_complete); @@ -1976,15 +1921,15 @@ static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data, BT_DBG("request for %s", hdev->name); if (cp->val != 0x00 && cp->val != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (cp->val) - changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE); else - changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE); err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev); if (err < 0) @@ -2002,7 +1947,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; u8 val, status; int err; @@ -2010,21 +1955,20 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, status = mgmt_bredr_support(hdev); if (status) - return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, - status); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, + status); if (cp->val != 0x00 && cp->val != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { bool changed = false; - if (!!cp->val != test_bit(HCI_LINK_SECURITY, - &hdev->dev_flags)) { - change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); + if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { + hci_dev_change_flag(hdev, HCI_LINK_SECURITY); changed = true; } @@ -2038,9 +1982,9 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, + MGMT_STATUS_BUSY); goto failed; } @@ -2071,7 +2015,7 @@ failed: static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; u8 status; int err; @@ -2079,15 +2023,15 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) status = mgmt_bredr_support(hdev); if (status) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status); if (!lmp_ssp_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, + MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); @@ -2095,16 +2039,16 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) bool changed; if (cp->val) { - changed = !test_and_set_bit(HCI_SSP_ENABLED, - &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, + HCI_SSP_ENABLED); } else { - changed = test_and_clear_bit(HCI_SSP_ENABLED, - &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, + HCI_SSP_ENABLED); if (!changed) - changed = test_and_clear_bit(HCI_HS_ENABLED, - &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, + HCI_HS_ENABLED); else - clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); @@ -2117,13 +2061,13 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_SSP, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, + MGMT_STATUS_BUSY); goto failed; } - if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { + if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); goto failed; } @@ -2134,7 +2078,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) goto failed; } - if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) + if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(cp->val), &cp->val); @@ -2160,38 +2104,38 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) status = mgmt_bredr_support(hdev); if (status) - return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); if (!lmp_ssp_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_NOT_SUPPORTED); - if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_REJECTED); + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); - if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_SSP, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_BUSY); goto unlock; } if (cp->val) { - changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED); } else { if (hdev_is_powered(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_REJECTED); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_REJECTED); goto unlock; } - changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED); } err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev); @@ -2232,7 +2176,7 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) * has actually been enabled. During power on, the * update in powered_update_hci will take care of it. */ - if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { struct hci_request req; hci_req_init(&req, hdev); @@ -2250,7 +2194,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct hci_cp_write_le_host_supported hci_cp; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; int err; u8 val, enabled; @@ -2258,17 +2202,17 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) BT_DBG("request for %s", hdev->name); if (!lmp_le_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_LE, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_LE, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_INVALID_PARAMS); /* LE-only devices do not allow toggling LE on/off */ - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_LE, - MGMT_STATUS_REJECTED); + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_REJECTED); hci_dev_lock(hdev); @@ -2278,13 +2222,13 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) if (!hdev_is_powered(hdev) || val == enabled) { bool changed = false; - if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { - change_bit(HCI_LE_ENABLED, &hdev->dev_flags); + if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + hci_dev_change_flag(hdev, HCI_LE_ENABLED); changed = true; } - if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { - clear_bit(HCI_ADVERTISING, &hdev->dev_flags); + if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) { + hci_dev_clear_flag(hdev, HCI_ADVERTISING); changed = true; } @@ -2298,10 +2242,10 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) goto unlock; } - if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) || - mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_LE, hdev) || + pending_find(MGMT_OP_SET_ADVERTISING, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_BUSY); goto unlock; } @@ -2319,7 +2263,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) hci_cp.le = val; hci_cp.simul = 0x00; } else { - if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) disable_advertising(&req); } @@ -2343,7 +2287,7 @@ unlock: */ static bool pending_eir_or_class(struct hci_dev *hdev) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; list_for_each_entry(cmd, &hdev->mgmt_pending, list) { switch (cmd->opcode) { @@ -2379,16 +2323,16 @@ static u8 get_uuid_size(const u8 *uuid) static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; hci_dev_lock(hdev); - cmd = mgmt_pending_find(mgmt_op, hdev); + cmd = pending_find(mgmt_op, hdev); if (!cmd) goto unlock; - cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), - hdev->dev_class, 3); + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(status), hdev->dev_class, 3); mgmt_pending_remove(cmd); @@ -2406,7 +2350,7 @@ static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode) static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_uuid *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; struct bt_uuid *uuid; int err; @@ -2416,8 +2360,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) hci_dev_lock(hdev); if (pending_eir_or_class(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, - MGMT_STATUS_BUSY); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, + MGMT_STATUS_BUSY); goto failed; } @@ -2443,8 +2387,8 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) if (err != -ENODATA) goto failed; - err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, - hdev->dev_class, 3); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, + hdev->dev_class, 3); goto failed; } @@ -2466,7 +2410,7 @@ static bool enable_service_cache(struct hci_dev *hdev) if (!hdev_is_powered(hdev)) return false; - if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { + if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) { queue_delayed_work(hdev->workqueue, &hdev->service_cache, CACHE_TIMEOUT); return true; @@ -2486,7 +2430,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_remove_uuid *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct bt_uuid *match, *tmp; u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; struct hci_request req; @@ -2497,8 +2441,8 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); if (pending_eir_or_class(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, - MGMT_STATUS_BUSY); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, + MGMT_STATUS_BUSY); goto unlock; } @@ -2506,8 +2450,9 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, hci_uuids_clear(hdev); if (enable_service_cache(hdev)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, - 0, hdev->dev_class, 3); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_REMOVE_UUID, + 0, hdev->dev_class, 3); goto unlock; } @@ -2526,8 +2471,8 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, } if (found == 0) { - err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, - MGMT_STATUS_INVALID_PARAMS); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, + MGMT_STATUS_INVALID_PARAMS); goto unlock; } @@ -2542,8 +2487,8 @@ update_class: if (err != -ENODATA) goto unlock; - err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, - hdev->dev_class, 3); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, + hdev->dev_class, 3); goto unlock; } @@ -2571,27 +2516,27 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_dev_class *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; int err; BT_DBG("request for %s", hdev->name); if (!lmp_bredr_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, + MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); if (pending_eir_or_class(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, - MGMT_STATUS_BUSY); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, + MGMT_STATUS_BUSY); goto unlock; } if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, - MGMT_STATUS_INVALID_PARAMS); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, + MGMT_STATUS_INVALID_PARAMS); goto unlock; } @@ -2599,14 +2544,14 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, hdev->minor_class = cp->minor; if (!hdev_is_powered(hdev)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, - hdev->dev_class, 3); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, + hdev->dev_class, 3); goto unlock; } hci_req_init(&req, hdev); - if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { + if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) { hci_dev_unlock(hdev); cancel_delayed_work_sync(&hdev->service_cache); hci_dev_lock(hdev); @@ -2620,8 +2565,8 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, if (err != -ENODATA) goto unlock; - err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, - hdev->dev_class, 3); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, + hdev->dev_class, 3); goto unlock; } @@ -2651,15 +2596,15 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, BT_DBG("request for %s", hdev->name); if (!lmp_bredr_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, + MGMT_STATUS_NOT_SUPPORTED); key_count = __le16_to_cpu(cp->key_count); if (key_count > max_key_count) { BT_ERR("load_link_keys: too big key_count value %u", key_count); - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, + MGMT_STATUS_INVALID_PARAMS); } expected_len = sizeof(*cp) + key_count * @@ -2667,13 +2612,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, if (expected_len != len) { BT_ERR("load_link_keys: expected %u bytes, got %u bytes", expected_len, len); - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, + MGMT_STATUS_INVALID_PARAMS); } if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, + MGMT_STATUS_INVALID_PARAMS); BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, key_count); @@ -2682,8 +2627,9 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, struct mgmt_link_key_info *key = &cp->keys[i]; if (key->addr.type != BDADDR_BREDR || key->type > 0x08) - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_LOAD_LINK_KEYS, + MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); @@ -2691,11 +2637,10 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, hci_link_keys_clear(hdev); if (cp->debug_keys) - changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS, - &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS); else - changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS, - &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, + HCI_KEEP_DEBUG_KEYS); if (changed) new_settings(hdev, NULL); @@ -2713,7 +2658,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, key->type, key->pin_len, NULL); } - cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); hci_dev_unlock(hdev); @@ -2738,7 +2683,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, struct mgmt_cp_unpair_device *cp = data; struct mgmt_rp_unpair_device rp; struct hci_cp_disconnect dc; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; @@ -2747,20 +2692,21 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) - return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, - MGMT_STATUS_INVALID_PARAMS, - &rp, sizeof(rp)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); if (cp->disconnect != 0x00 && cp->disconnect != 0x01) - return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, - MGMT_STATUS_INVALID_PARAMS, - &rp, sizeof(rp)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, - MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, + MGMT_STATUS_NOT_POWERED, &rp, + sizeof(rp)); goto unlock; } @@ -2810,8 +2756,9 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, } if (err < 0) { - err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, - MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, + MGMT_STATUS_NOT_PAIRED, &rp, + sizeof(rp)); goto unlock; } @@ -2819,8 +2766,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, * link is requested. */ if (!conn) { - err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0, - &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0, + &rp, sizeof(rp)); device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk); goto unlock; } @@ -2850,7 +2797,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, { struct mgmt_cp_disconnect *cp = data; struct mgmt_rp_disconnect rp; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; @@ -2861,21 +2808,22 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) - return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, - MGMT_STATUS_INVALID_PARAMS, - &rp, sizeof(rp)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); hci_dev_lock(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, - MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, + MGMT_STATUS_NOT_POWERED, &rp, + sizeof(rp)); goto failed; } - if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, - MGMT_STATUS_BUSY, &rp, sizeof(rp)); + if (pending_find(MGMT_OP_DISCONNECT, hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, + MGMT_STATUS_BUSY, &rp, sizeof(rp)); goto failed; } @@ -2886,8 +2834,9 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { - err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, - MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, + MGMT_STATUS_NOT_CONNECTED, &rp, + sizeof(rp)); goto failed; } @@ -2941,8 +2890,8 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, - MGMT_STATUS_NOT_POWERED); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, + MGMT_STATUS_NOT_POWERED); goto unlock; } @@ -2975,8 +2924,8 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data, /* Recalculate length in case of filtered SCO connections, etc */ rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); - err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp, - rp_len); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp, + rp_len); kfree(rp); @@ -2988,7 +2937,7 @@ unlock: static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; int err; cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, @@ -3010,7 +2959,7 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data, struct hci_conn *conn; struct mgmt_cp_pin_code_reply *cp = data; struct hci_cp_pin_code_reply reply; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; int err; BT_DBG(""); @@ -3018,15 +2967,15 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, - MGMT_STATUS_NOT_POWERED); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, + MGMT_STATUS_NOT_POWERED); goto failed; } conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); if (!conn) { - err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, - MGMT_STATUS_NOT_CONNECTED); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, + MGMT_STATUS_NOT_CONNECTED); goto failed; } @@ -3039,8 +2988,8 @@ static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data, err = send_pin_code_neg_reply(sk, hdev, &ncp); if (err >= 0) - err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, - MGMT_STATUS_INVALID_PARAMS); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, + MGMT_STATUS_INVALID_PARAMS); goto failed; } @@ -3074,8 +3023,8 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, BT_DBG(""); if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY) - return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, - MGMT_STATUS_INVALID_PARAMS, NULL, 0); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, + MGMT_STATUS_INVALID_PARAMS, NULL, 0); hci_dev_lock(hdev); @@ -3086,14 +3035,14 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_unlock(hdev); - return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL, - 0); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, + NULL, 0); } -static struct pending_cmd *find_pairing(struct hci_conn *conn) +static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; list_for_each_entry(cmd, &hdev->mgmt_pending, list) { if (cmd->opcode != MGMT_OP_PAIR_DEVICE) @@ -3108,7 +3057,7 @@ static struct pending_cmd *find_pairing(struct hci_conn *conn) return NULL; } -static int pairing_complete(struct pending_cmd *cmd, u8 status) +static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status) { struct mgmt_rp_pair_device rp; struct hci_conn *conn = cmd->user_data; @@ -3117,8 +3066,8 @@ static int pairing_complete(struct pending_cmd *cmd, u8 status) bacpy(&rp.addr.bdaddr, &conn->dst); rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); - err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status, - &rp, sizeof(rp)); + err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, + status, &rp, sizeof(rp)); /* So we don't get further callbacks for this connection */ conn->connect_cfm_cb = NULL; @@ -3140,7 +3089,7 @@ static int pairing_complete(struct pending_cmd *cmd, u8 status) void mgmt_smp_complete(struct hci_conn *conn, bool complete) { u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; cmd = find_pairing(conn); if (cmd) { @@ -3151,7 +3100,7 @@ void mgmt_smp_complete(struct hci_conn *conn, bool complete) static void pairing_complete_cb(struct hci_conn *conn, u8 status) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; BT_DBG("status %u", status); @@ -3167,7 +3116,7 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status) static void le_pairing_complete_cb(struct hci_conn *conn, u8 status) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; BT_DBG("status %u", status); @@ -3189,7 +3138,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, { struct mgmt_cp_pair_device *cp = data; struct mgmt_rp_pair_device rp; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; u8 sec_level, auth_type; struct hci_conn *conn; int err; @@ -3201,20 +3150,28 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) - return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, - MGMT_STATUS_INVALID_PARAMS, - &rp, sizeof(rp)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY) - return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, - MGMT_STATUS_INVALID_PARAMS, - &rp, sizeof(rp)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, - MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + MGMT_STATUS_NOT_POWERED, &rp, + sizeof(rp)); + goto unlock; + } + + if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + MGMT_STATUS_ALREADY_PAIRED, &rp, + sizeof(rp)); goto unlock; } @@ -3262,16 +3219,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, else status = MGMT_STATUS_CONNECT_FAILED; - err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, - status, &rp, - sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + status, &rp, sizeof(rp)); goto unlock; } if (conn->connect_cfm_cb) { hci_conn_drop(conn); - err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, - MGMT_STATUS_BUSY, &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + MGMT_STATUS_BUSY, &rp, sizeof(rp)); goto unlock; } @@ -3315,7 +3271,7 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_addr_info *addr = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; @@ -3324,31 +3280,31 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, - MGMT_STATUS_NOT_POWERED); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, + MGMT_STATUS_NOT_POWERED); goto unlock; } - cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev); + cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev); if (!cmd) { - err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, - MGMT_STATUS_INVALID_PARAMS); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS); goto unlock; } conn = cmd->user_data; if (bacmp(&addr->bdaddr, &conn->dst) != 0) { - err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, - MGMT_STATUS_INVALID_PARAMS); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS); goto unlock; } cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED); mgmt_pending_remove(cmd); - err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, - addr, sizeof(*addr)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, + addr, sizeof(*addr)); unlock: hci_dev_unlock(hdev); return err; @@ -3358,16 +3314,16 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, struct mgmt_addr_info *addr, u16 mgmt_op, u16 hci_op, __le32 passkey) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_complete(sk, hdev->id, mgmt_op, - MGMT_STATUS_NOT_POWERED, addr, - sizeof(*addr)); + err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, + MGMT_STATUS_NOT_POWERED, addr, + sizeof(*addr)); goto done; } @@ -3377,22 +3333,22 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr); if (!conn) { - err = cmd_complete(sk, hdev->id, mgmt_op, - MGMT_STATUS_NOT_CONNECTED, addr, - sizeof(*addr)); + err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, + MGMT_STATUS_NOT_CONNECTED, addr, + sizeof(*addr)); goto done; } if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { err = smp_user_confirm_reply(conn, mgmt_op, passkey); if (!err) - err = cmd_complete(sk, hdev->id, mgmt_op, - MGMT_STATUS_SUCCESS, addr, - sizeof(*addr)); + err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, + MGMT_STATUS_SUCCESS, addr, + sizeof(*addr)); else - err = cmd_complete(sk, hdev->id, mgmt_op, - MGMT_STATUS_FAILED, addr, - sizeof(*addr)); + err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, + MGMT_STATUS_FAILED, addr, + sizeof(*addr)); goto done; } @@ -3444,8 +3400,8 @@ static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data, BT_DBG(""); if (len != sizeof(*cp)) - return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, + MGMT_STATUS_INVALID_PARAMS); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_USER_CONFIRM_REPLY, @@ -3501,24 +3457,24 @@ static void update_name(struct hci_request *req) static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct mgmt_cp_set_local_name *cp; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; BT_DBG("status 0x%02x", status); hci_dev_lock(hdev); - cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); + cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); if (!cmd) goto unlock; cp = cmd->param; if (status) - cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, - mgmt_status(status)); + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, + mgmt_status(status)); else - cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, - cp, sizeof(*cp)); + mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, + cp, sizeof(*cp)); mgmt_pending_remove(cmd); @@ -3530,7 +3486,7 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_local_name *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; int err; @@ -3544,8 +3500,8 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) && !memcmp(hdev->short_name, cp->short_name, sizeof(hdev->short_name))) { - err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, - data, len); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, + data, len); goto failed; } @@ -3554,13 +3510,13 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, if (!hdev_is_powered(hdev)) { memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); - err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, - data, len); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, + data, len); if (err < 0) goto failed; - err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len, - sk); + err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, + data, len, sk); goto failed; } @@ -3598,7 +3554,7 @@ failed: static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; int err; BT_DBG("%s", hdev->name); @@ -3606,20 +3562,20 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, - MGMT_STATUS_NOT_POWERED); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_NOT_POWERED); goto unlock; } if (!lmp_ssp_capable(hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, - MGMT_STATUS_NOT_SUPPORTED); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_NOT_SUPPORTED); goto unlock; } - if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_BUSY); goto unlock; } @@ -3652,9 +3608,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, BT_DBG("%s ", hdev->name); if (!bdaddr_type_is_valid(addr->type)) - return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS, addr, - sizeof(*addr)); + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + addr, sizeof(*addr)); hci_dev_lock(hdev); @@ -3663,10 +3620,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, u8 status; if (cp->addr.type != BDADDR_BREDR) { - err = cmd_complete(sk, hdev->id, - MGMT_OP_ADD_REMOTE_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS, - &cp->addr, sizeof(cp->addr)); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); goto unlock; } @@ -3678,8 +3635,9 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, else status = MGMT_STATUS_SUCCESS; - err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, - status, &cp->addr, sizeof(cp->addr)); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, status, + &cp->addr, sizeof(cp->addr)); } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) { struct mgmt_cp_add_remote_oob_ext_data *cp = data; u8 *rand192, *hash192, *rand256, *hash256; @@ -3691,10 +3649,10 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, */ if (memcmp(cp->rand192, ZERO_KEY, 16) || memcmp(cp->hash192, ZERO_KEY, 16)) { - err = cmd_complete(sk, hdev->id, - MGMT_OP_ADD_REMOTE_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS, - addr, sizeof(*addr)); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + addr, sizeof(*addr)); goto unlock; } @@ -3734,12 +3692,13 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, else status = MGMT_STATUS_SUCCESS; - err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, - status, &cp->addr, sizeof(cp->addr)); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, + status, &cp->addr, sizeof(cp->addr)); } else { BT_ERR("add_remote_oob_data: invalid length of %u bytes", len); - err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS); } unlock: @@ -3757,9 +3716,10 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, BT_DBG("%s", hdev->name); if (cp->addr.type != BDADDR_BREDR) - return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, - MGMT_STATUS_INVALID_PARAMS, - &cp->addr, sizeof(cp->addr)); + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_REMOVE_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); @@ -3776,100 +3736,136 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, status = MGMT_STATUS_SUCCESS; done: - err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, - status, &cp->addr, sizeof(cp->addr)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, + status, &cp->addr, sizeof(cp->addr)); hci_dev_unlock(hdev); return err; } -static bool trigger_discovery(struct hci_request *req, u8 *status) +static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status) { struct hci_dev *hdev = req->hdev; - struct hci_cp_le_set_scan_param param_cp; - struct hci_cp_le_set_scan_enable enable_cp; - struct hci_cp_inquiry inq_cp; + struct hci_cp_inquiry cp; /* General inquiry access code (GIAC) */ u8 lap[3] = { 0x33, 0x8b, 0x9e }; + + *status = mgmt_bredr_support(hdev); + if (*status) + return false; + + if (hci_dev_test_flag(hdev, HCI_INQUIRY)) { + *status = MGMT_STATUS_BUSY; + return false; + } + + hci_inquiry_cache_flush(hdev); + + memset(&cp, 0, sizeof(cp)); + memcpy(&cp.lap, lap, sizeof(cp.lap)); + cp.length = DISCOV_BREDR_INQUIRY_LEN; + + hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); + + return true; +} + +static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status) +{ + struct hci_dev *hdev = req->hdev; + struct hci_cp_le_set_scan_param param_cp; + struct hci_cp_le_set_scan_enable enable_cp; u8 own_addr_type; int err; - switch (hdev->discovery.type) { - case DISCOV_TYPE_BREDR: - *status = mgmt_bredr_support(hdev); - if (*status) - return false; + *status = mgmt_le_support(hdev); + if (*status) + return false; - if (test_bit(HCI_INQUIRY, &hdev->flags)) { - *status = MGMT_STATUS_BUSY; + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) { + /* Don't let discovery abort an outgoing connection attempt + * that's using directed advertising. + */ + if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { + *status = MGMT_STATUS_REJECTED; return false; } - hci_inquiry_cache_flush(hdev); + disable_advertising(req); + } - memset(&inq_cp, 0, sizeof(inq_cp)); - memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap)); - inq_cp.length = DISCOV_BREDR_INQUIRY_LEN; - hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp); - break; + /* If controller is scanning, it means the background scanning is + * running. Thus, we should temporarily stop it in order to set the + * discovery scanning parameters. + */ + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) + hci_req_add_le_scan_disable(req); - case DISCOV_TYPE_LE: - case DISCOV_TYPE_INTERLEAVED: - *status = mgmt_le_support(hdev); - if (*status) - return false; + /* All active scans will be done with either a resolvable private + * address (when privacy feature has been enabled) or non-resolvable + * private address. + */ + err = hci_update_random_address(req, true, &own_addr_type); + if (err < 0) { + *status = MGMT_STATUS_FAILED; + return false; + } - if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED && - !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { - *status = MGMT_STATUS_NOT_SUPPORTED; + memset(¶m_cp, 0, sizeof(param_cp)); + param_cp.type = LE_SCAN_ACTIVE; + param_cp.interval = cpu_to_le16(interval); + param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); + param_cp.own_address_type = own_addr_type; + + hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), + ¶m_cp); + + memset(&enable_cp, 0, sizeof(enable_cp)); + enable_cp.enable = LE_SCAN_ENABLE; + enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), + &enable_cp); + + return true; +} + +static bool trigger_discovery(struct hci_request *req, u8 *status) +{ + struct hci_dev *hdev = req->hdev; + + switch (hdev->discovery.type) { + case DISCOV_TYPE_BREDR: + if (!trigger_bredr_inquiry(req, status)) return false; - } + break; - if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) { - /* Don't let discovery abort an outgoing - * connection attempt that's using directed - * advertising. + case DISCOV_TYPE_INTERLEAVED: + if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, + &hdev->quirks)) { + /* During simultaneous discovery, we double LE scan + * interval. We must leave some time for the controller + * to do BR/EDR inquiry. */ - if (hci_conn_hash_lookup_state(hdev, LE_LINK, - BT_CONNECT)) { - *status = MGMT_STATUS_REJECTED; + if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2, + status)) return false; - } - - disable_advertising(req); - } - /* If controller is scanning, it means the background scanning - * is running. Thus, we should temporarily stop it in order to - * set the discovery scanning parameters. - */ - if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) - hci_req_add_le_scan_disable(req); + if (!trigger_bredr_inquiry(req, status)) + return false; - memset(¶m_cp, 0, sizeof(param_cp)); + return true; + } - /* All active scans will be done with either a resolvable - * private address (when privacy feature has been enabled) - * or non-resolvable private address. - */ - err = hci_update_random_address(req, true, &own_addr_type); - if (err < 0) { - *status = MGMT_STATUS_FAILED; + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + *status = MGMT_STATUS_NOT_SUPPORTED; return false; } + /* fall through */ - param_cp.type = LE_SCAN_ACTIVE; - param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT); - param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); - param_cp.own_address_type = own_addr_type; - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), - ¶m_cp); - - memset(&enable_cp, 0, sizeof(enable_cp)); - enable_cp.enable = LE_SCAN_ENABLE; - enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), - &enable_cp); + case DISCOV_TYPE_LE: + if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status)) + return false; break; default: @@ -3883,16 +3879,16 @@ static bool trigger_discovery(struct hci_request *req, u8 *status) static void start_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; unsigned long timeout; BT_DBG("status %d", status); hci_dev_lock(hdev); - cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); + cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev); if (!cmd) - cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev); + cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev); if (cmd) { cmd->cmd_complete(cmd, mgmt_status(status)); @@ -3914,7 +3910,18 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status, timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); break; case DISCOV_TYPE_INTERLEAVED: - timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); + /* When running simultaneous discovery, the LE scanning time + * should occupy the whole discovery time sine BR/EDR inquiry + * and LE scanning are scheduled by the controller. + * + * For interleaving discovery in comparison, BR/EDR inquiry + * and LE scanning are done sequentially with separate + * timeouts. + */ + if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) + timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); + else + timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); break; case DISCOV_TYPE_BREDR: timeout = 0; @@ -3933,8 +3940,7 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status, */ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && - (hdev->discovery.uuid_count > 0 || - hdev->discovery.rssi != HCI_RSSI_INVALID)) { + hdev->discovery.result_filtering) { hdev->discovery.scan_start = jiffies; hdev->discovery.scan_duration = timeout; } @@ -3951,7 +3957,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_start_discovery *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; u8 status; int err; @@ -3961,17 +3967,17 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_NOT_POWERED, - &cp->type, sizeof(cp->type)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, + MGMT_STATUS_NOT_POWERED, + &cp->type, sizeof(cp->type)); goto failed; } if (hdev->discovery.state != DISCOVERY_STOPPED || - test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, - MGMT_STATUS_BUSY, &cp->type, - sizeof(cp->type)); + hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, + MGMT_STATUS_BUSY, &cp->type, + sizeof(cp->type)); goto failed; } @@ -3994,8 +4000,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev, hci_req_init(&req, hdev); if (!trigger_discovery(&req, &status)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, - status, &cp->type, sizeof(cp->type)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY, + status, &cp->type, sizeof(cp->type)); mgmt_pending_remove(cmd); goto failed; } @@ -4013,17 +4019,18 @@ failed: return err; } -static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status) +static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd, + u8 status) { - return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, - cmd->param, 1); + return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + cmd->param, 1); } static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_start_service_discovery *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16); u16 uuid_count, expected_len; @@ -4035,19 +4042,19 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_complete(sk, hdev->id, - MGMT_OP_START_SERVICE_DISCOVERY, - MGMT_STATUS_NOT_POWERED, - &cp->type, sizeof(cp->type)); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_NOT_POWERED, + &cp->type, sizeof(cp->type)); goto failed; } if (hdev->discovery.state != DISCOVERY_STOPPED || - test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) { - err = cmd_complete(sk, hdev->id, - MGMT_OP_START_SERVICE_DISCOVERY, - MGMT_STATUS_BUSY, &cp->type, - sizeof(cp->type)); + hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_BUSY, &cp->type, + sizeof(cp->type)); goto failed; } @@ -4055,10 +4062,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, if (uuid_count > max_uuid_count) { BT_ERR("service_discovery: too big uuid_count value %u", uuid_count); - err = cmd_complete(sk, hdev->id, - MGMT_OP_START_SERVICE_DISCOVERY, - MGMT_STATUS_INVALID_PARAMS, &cp->type, - sizeof(cp->type)); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_INVALID_PARAMS, &cp->type, + sizeof(cp->type)); goto failed; } @@ -4066,10 +4073,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, if (expected_len != len) { BT_ERR("service_discovery: expected %u bytes, got %u bytes", expected_len, len); - err = cmd_complete(sk, hdev->id, - MGMT_OP_START_SERVICE_DISCOVERY, - MGMT_STATUS_INVALID_PARAMS, &cp->type, - sizeof(cp->type)); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_INVALID_PARAMS, &cp->type, + sizeof(cp->type)); goto failed; } @@ -4087,6 +4094,7 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, */ hci_discovery_filter_clear(hdev); + hdev->discovery.result_filtering = true; hdev->discovery.type = cp->type; hdev->discovery.rssi = cp->rssi; hdev->discovery.uuid_count = uuid_count; @@ -4095,10 +4103,10 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16, GFP_KERNEL); if (!hdev->discovery.uuids) { - err = cmd_complete(sk, hdev->id, - MGMT_OP_START_SERVICE_DISCOVERY, - MGMT_STATUS_FAILED, - &cp->type, sizeof(cp->type)); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_FAILED, + &cp->type, sizeof(cp->type)); mgmt_pending_remove(cmd); goto failed; } @@ -4107,9 +4115,9 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, hci_req_init(&req, hdev); if (!trigger_discovery(&req, &status)) { - err = cmd_complete(sk, hdev->id, - MGMT_OP_START_SERVICE_DISCOVERY, - status, &cp->type, sizeof(cp->type)); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + status, &cp->type, sizeof(cp->type)); mgmt_pending_remove(cmd); goto failed; } @@ -4129,13 +4137,13 @@ failed: static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; BT_DBG("status %d", status); hci_dev_lock(hdev); - cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); + cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev); if (cmd) { cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); @@ -4151,7 +4159,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_stop_discovery *mgmt_cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; int err; @@ -4160,16 +4168,16 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); if (!hci_discovery_active(hdev)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, - MGMT_STATUS_REJECTED, &mgmt_cp->type, - sizeof(mgmt_cp->type)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, + MGMT_STATUS_REJECTED, &mgmt_cp->type, + sizeof(mgmt_cp->type)); goto unlock; } if (hdev->discovery.type != mgmt_cp->type) { - err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, - MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type, - sizeof(mgmt_cp->type)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, + MGMT_STATUS_INVALID_PARAMS, + &mgmt_cp->type, sizeof(mgmt_cp->type)); goto unlock; } @@ -4195,8 +4203,8 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, /* If no HCI commands were sent we're done */ if (err == -ENODATA) { - err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0, - &mgmt_cp->type, sizeof(mgmt_cp->type)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0, + &mgmt_cp->type, sizeof(mgmt_cp->type)); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); } @@ -4217,17 +4225,17 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); if (!hci_discovery_active(hdev)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, - MGMT_STATUS_FAILED, &cp->addr, - sizeof(cp->addr)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, + MGMT_STATUS_FAILED, &cp->addr, + sizeof(cp->addr)); goto failed; } e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr); if (!e) { - err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, - MGMT_STATUS_INVALID_PARAMS, &cp->addr, - sizeof(cp->addr)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, + MGMT_STATUS_INVALID_PARAMS, &cp->addr, + sizeof(cp->addr)); goto failed; } @@ -4239,8 +4247,8 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data, hci_inquiry_cache_update_resolve(hdev, e); } - err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr, - sizeof(cp->addr)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, + &cp->addr, sizeof(cp->addr)); failed: hci_dev_unlock(hdev); @@ -4257,9 +4265,9 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data, BT_DBG("%s", hdev->name); if (!bdaddr_type_is_valid(cp->addr.type)) - return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, - MGMT_STATUS_INVALID_PARAMS, - &cp->addr, sizeof(cp->addr)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); @@ -4275,8 +4283,8 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data, status = MGMT_STATUS_SUCCESS; done: - err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, - &cp->addr, sizeof(cp->addr)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, + &cp->addr, sizeof(cp->addr)); hci_dev_unlock(hdev); @@ -4293,9 +4301,9 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data, BT_DBG("%s", hdev->name); if (!bdaddr_type_is_valid(cp->addr.type)) - return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, - MGMT_STATUS_INVALID_PARAMS, - &cp->addr, sizeof(cp->addr)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); @@ -4311,8 +4319,8 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data, status = MGMT_STATUS_SUCCESS; done: - err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, - &cp->addr, sizeof(cp->addr)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, + &cp->addr, sizeof(cp->addr)); hci_dev_unlock(hdev); @@ -4332,8 +4340,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, source = __le16_to_cpu(cp->source); if (source > 0x0002) - return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); @@ -4342,7 +4350,8 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, hdev->devid_product = __le16_to_cpu(cp->product); hdev->devid_version = __le16_to_cpu(cp->version); - err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, + NULL, 0); hci_req_init(&req, hdev); update_eir(&req); @@ -4368,10 +4377,10 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status, goto unlock; } - if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) - set_bit(HCI_ADVERTISING, &hdev->dev_flags); + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) + hci_dev_set_flag(hdev, HCI_ADVERTISING); else - clear_bit(HCI_ADVERTISING, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_ADVERTISING); mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, &match); @@ -4389,41 +4398,48 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; - u8 val, enabled, status; + u8 val, status; int err; BT_DBG("request for %s", hdev->name); status = mgmt_le_support(hdev); if (status) - return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, - status); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + status); - if (cp->val != 0x00 && cp->val != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, - MGMT_STATUS_INVALID_PARAMS); + if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); val = !!cp->val; - enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags); /* The following conditions are ones which mean that we should * not do any HCI communication but directly send a mgmt * response to user space (after toggling the flag if * necessary). */ - if (!hdev_is_powered(hdev) || val == enabled || + if (!hdev_is_powered(hdev) || + (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) && + (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) || hci_conn_num(hdev, LE_LINK) > 0 || - (test_bit(HCI_LE_SCAN, &hdev->dev_flags) && + (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->le_scan_type == LE_SCAN_ACTIVE)) { - bool changed = false; + bool changed; - if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { - change_bit(HCI_ADVERTISING, &hdev->dev_flags); - changed = true; + if (cp->val) { + changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING); + if (cp->val == 0x02) + hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + else + hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + } else { + changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING); + hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); } err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev); @@ -4436,10 +4452,10 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, goto unlock; } - if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) || - mgmt_pending_find(MGMT_OP_SET_LE, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) || + pending_find(MGMT_OP_SET_LE, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_BUSY); goto unlock; } @@ -4451,6 +4467,11 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, hci_req_init(&req, hdev); + if (cp->val == 0x02) + hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + else + hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + if (val) enable_advertising(&req); else @@ -4474,34 +4495,38 @@ static int set_static_address(struct sock *sk, struct hci_dev *hdev, BT_DBG("%s", hdev->name); if (!lmp_le_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, + MGMT_STATUS_NOT_SUPPORTED); if (hdev_is_powered(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, - MGMT_STATUS_REJECTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, + MGMT_STATUS_REJECTED); if (bacmp(&cp->bdaddr, BDADDR_ANY)) { if (!bacmp(&cp->bdaddr, BDADDR_NONE)) - return cmd_status(sk, hdev->id, - MGMT_OP_SET_STATIC_ADDRESS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_STATIC_ADDRESS, + MGMT_STATUS_INVALID_PARAMS); /* Two most significant bits shall be set */ if ((cp->bdaddr.b[5] & 0xc0) != 0xc0) - return cmd_status(sk, hdev->id, - MGMT_OP_SET_STATIC_ADDRESS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_STATIC_ADDRESS, + MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); bacpy(&hdev->static_addr, &cp->bdaddr); - err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0); + err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev); + if (err < 0) + goto unlock; - hci_dev_unlock(hdev); + err = new_settings(hdev, sk); +unlock: + hci_dev_unlock(hdev); return err; } @@ -4515,36 +4540,37 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev, BT_DBG("%s", hdev->name); if (!lmp_le_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, + MGMT_STATUS_NOT_SUPPORTED); interval = __le16_to_cpu(cp->interval); if (interval < 0x0004 || interval > 0x4000) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, + MGMT_STATUS_INVALID_PARAMS); window = __le16_to_cpu(cp->window); if (window < 0x0004 || window > 0x4000) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, + MGMT_STATUS_INVALID_PARAMS); if (window > interval) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); hdev->le_scan_interval = interval; hdev->le_scan_window = window; - err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, + NULL, 0); /* If background scan is running, restart it so new parameters are * loaded. */ - if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->discovery.state == DISCOVERY_STOPPED) { struct hci_request req; @@ -4564,26 +4590,26 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev, static void fast_connectable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; BT_DBG("status 0x%02x", status); hci_dev_lock(hdev); - cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev); + cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev); if (!cmd) goto unlock; if (status) { - cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - mgmt_status(status)); + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + mgmt_status(status)); } else { struct mgmt_mode *cp = cmd->param; if (cp->val) - set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE); else - clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE); send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); new_settings(hdev, cmd->sk); @@ -4599,40 +4625,40 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; int err; BT_DBG("%s", hdev->name); - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) || + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) || hdev->hci_ver < BLUETOOTH_VER_1_2) - return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_INVALID_PARAMS); - - if (!hdev_is_powered(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_NOT_POWERED); - - if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_REJECTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); - if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + MGMT_STATUS_BUSY); goto unlock; } - if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) { + if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) { + err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, + hdev); + goto unlock; + } + + if (!hdev_is_powered(hdev)) { + hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE); err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); + new_settings(hdev, sk); goto unlock; } @@ -4649,8 +4675,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, err = hci_req_run(&req, fast_connectable_complete); if (err < 0) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, - MGMT_STATUS_FAILED); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + MGMT_STATUS_FAILED); mgmt_pending_remove(cmd); } @@ -4662,13 +4688,13 @@ unlock: static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; BT_DBG("status 0x%02x", status); hci_dev_lock(hdev); - cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev); + cmd = pending_find(MGMT_OP_SET_BREDR, hdev); if (!cmd) goto unlock; @@ -4678,9 +4704,9 @@ static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode) /* We need to restore the flag if related HCI commands * failed. */ - clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); - cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); } else { send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev); new_settings(hdev, cmd->sk); @@ -4695,41 +4721,41 @@ unlock: static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; int err; BT_DBG("request for %s", hdev->name); if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_NOT_SUPPORTED); - if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, - MGMT_STATUS_REJECTED); + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); - if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { + if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); goto unlock; } if (!hdev_is_powered(hdev)) { if (!cp->val) { - clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); - clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); - clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags); - clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); - clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); + hci_dev_clear_flag(hdev, HCI_LINK_SECURITY); + hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE); + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } - change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); + hci_dev_change_flag(hdev, HCI_BREDR_ENABLED); err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); if (err < 0) @@ -4741,8 +4767,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) /* Reject disabling when powered on */ if (!cp->val) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, - MGMT_STATUS_REJECTED); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_REJECTED); goto unlock; } else { /* When configuring a dual-mode controller to operate @@ -4759,18 +4785,18 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) * switching BR/EDR back on when secure connections has been * enabled is not a supported transaction. */ - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && (bacmp(&hdev->static_addr, BDADDR_ANY) || - test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, - MGMT_STATUS_REJECTED); + hci_dev_test_flag(hdev, HCI_SC_ENABLED))) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_REJECTED); goto unlock; } } - if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_BREDR, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_BUSY); goto unlock; } @@ -4783,7 +4809,7 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) /* We need to flip the bit already here so that update_adv_data * generates the correct flags. */ - set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); hci_req_init(&req, hdev); @@ -4806,20 +4832,20 @@ unlock: static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct mgmt_mode *cp; BT_DBG("%s status %u", hdev->name, status); hci_dev_lock(hdev); - cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev); + cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev); if (!cmd) goto unlock; if (status) { - cmd_status(cmd->sk, cmd->index, cmd->opcode, - mgmt_status(status)); + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(status)); goto remove; } @@ -4827,16 +4853,16 @@ static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) switch (cp->val) { case 0x00: - clear_bit(HCI_SC_ENABLED, &hdev->dev_flags); - clear_bit(HCI_SC_ONLY, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_SC_ENABLED); + hci_dev_clear_flag(hdev, HCI_SC_ONLY); break; case 0x01: - set_bit(HCI_SC_ENABLED, &hdev->dev_flags); - clear_bit(HCI_SC_ONLY, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + hci_dev_clear_flag(hdev, HCI_SC_ONLY); break; case 0x02: - set_bit(HCI_SC_ENABLED, &hdev->dev_flags); - set_bit(HCI_SC_ONLY, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + hci_dev_set_flag(hdev, HCI_SC_ONLY); break; } @@ -4853,7 +4879,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; u8 val; int err; @@ -4861,37 +4887,37 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, BT_DBG("request for %s", hdev->name); if (!lmp_sc_capable(hdev) && - !test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, - MGMT_STATUS_NOT_SUPPORTED); + !hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, + MGMT_STATUS_NOT_SUPPORTED); - if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && lmp_sc_capable(hdev) && - !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, - MGMT_STATUS_REJECTED); + !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, + MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) - return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) || - !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { + !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { bool changed; if (cp->val) { - changed = !test_and_set_bit(HCI_SC_ENABLED, - &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, + HCI_SC_ENABLED); if (cp->val == 0x02) - set_bit(HCI_SC_ONLY, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_SC_ONLY); else - clear_bit(HCI_SC_ONLY, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_SC_ONLY); } else { - changed = test_and_clear_bit(HCI_SC_ENABLED, - &hdev->dev_flags); - clear_bit(HCI_SC_ONLY, &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, + HCI_SC_ENABLED); + hci_dev_clear_flag(hdev, HCI_SC_ONLY); } err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); @@ -4904,16 +4930,16 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) { - err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, - MGMT_STATUS_BUSY); + if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, + MGMT_STATUS_BUSY); goto failed; } val = !!cp->val; - if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && - (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) { + if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) && + (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) { err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); goto failed; } @@ -4947,27 +4973,26 @@ static int set_debug_keys(struct sock *sk, struct hci_dev *hdev, BT_DBG("request for %s", hdev->name); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) - return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, + MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (cp->val) - changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS, - &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS); else - changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS, - &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, + HCI_KEEP_DEBUG_KEYS); if (cp->val == 0x02) - use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS, - &hdev->dev_flags); + use_changed = !hci_dev_test_and_set_flag(hdev, + HCI_USE_DEBUG_KEYS); else - use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS, - &hdev->dev_flags); + use_changed = hci_dev_test_and_clear_flag(hdev, + HCI_USE_DEBUG_KEYS); if (hdev_is_powered(hdev) && use_changed && - test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { + hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { u8 mode = (cp->val == 0x02) ? 0x01 : 0x00; hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode), &mode); @@ -4995,32 +5020,32 @@ static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data, BT_DBG("request for %s", hdev->name); if (!lmp_le_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, + MGMT_STATUS_NOT_SUPPORTED); if (cp->privacy != 0x00 && cp->privacy != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, + MGMT_STATUS_INVALID_PARAMS); if (hdev_is_powered(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, - MGMT_STATUS_REJECTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, + MGMT_STATUS_REJECTED); hci_dev_lock(hdev); /* If user space supports this command it is also expected to * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag. */ - set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_RPA_RESOLVING); if (cp->privacy) { - changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY); memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); - set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); } else { - changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY); memset(hdev->irk, 0, sizeof(hdev->irk)); - clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); } err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev); @@ -5063,22 +5088,22 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, BT_DBG("request for %s", hdev->name); if (!lmp_le_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, + MGMT_STATUS_NOT_SUPPORTED); irk_count = __le16_to_cpu(cp->irk_count); if (irk_count > max_irk_count) { BT_ERR("load_irks: too big irk_count value %u", irk_count); - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, + MGMT_STATUS_INVALID_PARAMS); } expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info); if (expected_len != len) { BT_ERR("load_irks: expected %u bytes, got %u bytes", expected_len, len); - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, + MGMT_STATUS_INVALID_PARAMS); } BT_DBG("%s irk_count %u", hdev->name, irk_count); @@ -5087,9 +5112,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, struct mgmt_irk_info *key = &cp->irks[i]; if (!irk_is_valid(key)) - return cmd_status(sk, hdev->id, - MGMT_OP_LOAD_IRKS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_LOAD_IRKS, + MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); @@ -5109,9 +5134,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, BDADDR_ANY); } - set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_RPA_RESOLVING); - err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0); hci_dev_unlock(hdev); @@ -5149,14 +5174,14 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, BT_DBG("request for %s", hdev->name); if (!lmp_le_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, + MGMT_STATUS_NOT_SUPPORTED); key_count = __le16_to_cpu(cp->key_count); if (key_count > max_key_count) { BT_ERR("load_ltks: too big key_count value %u", key_count); - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, + MGMT_STATUS_INVALID_PARAMS); } expected_len = sizeof(*cp) + key_count * @@ -5164,8 +5189,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, if (expected_len != len) { BT_ERR("load_keys: expected %u bytes, got %u bytes", expected_len, len); - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, + MGMT_STATUS_INVALID_PARAMS); } BT_DBG("%s key_count %u", hdev->name, key_count); @@ -5174,9 +5199,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, struct mgmt_ltk_info *key = &cp->keys[i]; if (!ltk_is_valid(key)) - return cmd_status(sk, hdev->id, - MGMT_OP_LOAD_LONG_TERM_KEYS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_LOAD_LONG_TERM_KEYS, + MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); @@ -5221,7 +5246,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, key->rand); } - err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0, + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0, NULL, 0); hci_dev_unlock(hdev); @@ -5229,7 +5254,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, return err; } -static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status) +static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { struct hci_conn *conn = cmd->user_data; struct mgmt_rp_get_conn_info rp; @@ -5247,8 +5272,8 @@ static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status) rp.max_tx_power = HCI_TX_POWER_INVALID; } - err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, - &rp, sizeof(rp)); + err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, + status, &rp, sizeof(rp)); hci_conn_drop(conn); hci_conn_put(conn); @@ -5260,7 +5285,7 @@ static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status, u16 opcode) { struct hci_cp_read_rssi *cp; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_conn *conn; u16 handle; u8 status; @@ -5298,7 +5323,7 @@ static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status, goto unlock; } - cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn); + cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn); if (!cmd) goto unlock; @@ -5325,15 +5350,16 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) - return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, - MGMT_STATUS_INVALID_PARAMS, - &rp, sizeof(rp)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, - MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_NOT_POWERED, &rp, + sizeof(rp)); goto unlock; } @@ -5344,14 +5370,15 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); if (!conn || conn->state != BT_CONNECTED) { - err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, - MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_NOT_CONNECTED, &rp, + sizeof(rp)); goto unlock; } - if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, - MGMT_STATUS_BUSY, &rp, sizeof(rp)); + if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_BUSY, &rp, sizeof(rp)); goto unlock; } @@ -5371,7 +5398,7 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, struct hci_request req; struct hci_cp_read_tx_power req_txp_cp; struct hci_cp_read_rssi req_rssi_cp; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; hci_req_init(&req, hdev); req_rssi_cp.handle = cpu_to_le16(conn->handle); @@ -5419,8 +5446,8 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, rp.tx_power = conn->tx_power; rp.max_tx_power = conn->max_tx_power; - err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, - MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); } unlock: @@ -5428,7 +5455,7 @@ unlock: return err; } -static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status) +static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { struct hci_conn *conn = cmd->user_data; struct mgmt_rp_get_clock_info rp; @@ -5453,8 +5480,8 @@ static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status) } complete: - err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, - sizeof(rp)); + err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, + sizeof(rp)); if (conn) { hci_conn_drop(conn); @@ -5467,7 +5494,7 @@ complete: static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct hci_cp_read_clock *hci_cp; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_conn *conn; BT_DBG("%s status %u", hdev->name, status); @@ -5485,7 +5512,7 @@ static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode) conn = NULL; } - cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn); + cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn); if (!cmd) goto unlock; @@ -5502,7 +5529,7 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, struct mgmt_cp_get_clock_info *cp = data; struct mgmt_rp_get_clock_info rp; struct hci_cp_read_clock hci_cp; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; struct hci_conn *conn; int err; @@ -5514,15 +5541,16 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, rp.addr.type = cp->addr.type; if (cp->addr.type != BDADDR_BREDR) - return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, - MGMT_STATUS_INVALID_PARAMS, - &rp, sizeof(rp)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { - err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, - MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, + MGMT_STATUS_NOT_POWERED, &rp, + sizeof(rp)); goto unlock; } @@ -5530,10 +5558,10 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); if (!conn || conn->state != BT_CONNECTED) { - err = cmd_complete(sk, hdev->id, - MGMT_OP_GET_CLOCK_INFO, - MGMT_STATUS_NOT_CONNECTED, - &rp, sizeof(rp)); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_GET_CLOCK_INFO, + MGMT_STATUS_NOT_CONNECTED, + &rp, sizeof(rp)); goto unlock; } } else { @@ -5644,13 +5672,13 @@ static void device_added(struct sock *sk, struct hci_dev *hdev, static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; BT_DBG("status 0x%02x", status); hci_dev_lock(hdev); - cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev); + cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev); if (!cmd) goto unlock; @@ -5665,7 +5693,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_device *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; u8 auto_conn, addr_type; int err; @@ -5674,14 +5702,14 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, if (!bdaddr_type_is_valid(cp->addr.type) || !bacmp(&cp->addr.bdaddr, BDADDR_ANY)) - return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, - MGMT_STATUS_INVALID_PARAMS, - &cp->addr, sizeof(cp->addr)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02) - return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, - MGMT_STATUS_INVALID_PARAMS, - &cp->addr, sizeof(cp->addr)); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); hci_req_init(&req, hdev); @@ -5767,13 +5795,13 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev, static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; BT_DBG("status 0x%02x", status); hci_dev_lock(hdev); - cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev); + cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev); if (!cmd) goto unlock; @@ -5788,7 +5816,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_remove_device *cp = data; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct hci_request req; int err; @@ -5921,15 +5949,15 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, int i; if (!lmp_le_capable(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, + MGMT_STATUS_NOT_SUPPORTED); param_count = __le16_to_cpu(cp->param_count); if (param_count > max_param_count) { BT_ERR("load_conn_param: too big param_count value %u", param_count); - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, + MGMT_STATUS_INVALID_PARAMS); } expected_len = sizeof(*cp) + param_count * @@ -5937,8 +5965,8 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, if (expected_len != len) { BT_ERR("load_conn_param: expected %u bytes, got %u bytes", expected_len, len); - return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, + MGMT_STATUS_INVALID_PARAMS); } BT_DBG("%s param_count %u", hdev->name, param_count); @@ -5993,7 +6021,8 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_unlock(hdev); - return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, + NULL, 0); } static int set_external_config(struct sock *sk, struct hci_dev *hdev, @@ -6006,25 +6035,23 @@ static int set_external_config(struct sock *sk, struct hci_dev *hdev, BT_DBG("%s", hdev->name); if (hdev_is_powered(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, - MGMT_STATUS_REJECTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, + MGMT_STATUS_REJECTED); if (cp->config != 0x00 && cp->config != 0x01) - return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, + MGMT_STATUS_INVALID_PARAMS); if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, + MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); if (cp->config) - changed = !test_and_set_bit(HCI_EXT_CONFIGURED, - &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED); else - changed = test_and_clear_bit(HCI_EXT_CONFIGURED, - &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED); err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev); if (err < 0) @@ -6035,12 +6062,12 @@ static int set_external_config(struct sock *sk, struct hci_dev *hdev, err = new_options(hdev, sk); - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) { + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) { mgmt_index_removed(hdev); - if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) { - set_bit(HCI_CONFIG, &hdev->dev_flags); - set_bit(HCI_AUTO_OFF, &hdev->dev_flags); + if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) { + hci_dev_set_flag(hdev, HCI_CONFIG); + hci_dev_set_flag(hdev, HCI_AUTO_OFF); queue_work(hdev->req_workqueue, &hdev->power_on); } else { @@ -6064,16 +6091,16 @@ static int set_public_address(struct sock *sk, struct hci_dev *hdev, BT_DBG("%s", hdev->name); if (hdev_is_powered(hdev)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, - MGMT_STATUS_REJECTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, + MGMT_STATUS_REJECTED); if (!bacmp(&cp->bdaddr, BDADDR_ANY)) - return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, - MGMT_STATUS_INVALID_PARAMS); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, + MGMT_STATUS_INVALID_PARAMS); if (!hdev->set_bdaddr) - return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, - MGMT_STATUS_NOT_SUPPORTED); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, + MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); @@ -6087,16 +6114,16 @@ static int set_public_address(struct sock *sk, struct hci_dev *hdev, if (!changed) goto unlock; - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) err = new_options(hdev, sk); if (is_configured(hdev)) { mgmt_index_removed(hdev); - clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_UNCONFIGURED); - set_bit(HCI_CONFIG, &hdev->dev_flags); - set_bit(HCI_AUTO_OFF, &hdev->dev_flags); + hci_dev_set_flag(hdev, HCI_CONFIG); + hci_dev_set_flag(hdev, HCI_AUTO_OFF); queue_work(hdev->req_workqueue, &hdev->power_on); } @@ -6106,213 +6133,339 @@ unlock: return err; } -static const struct mgmt_handler { - int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, - u16 data_len); - bool var_len; - size_t data_len; -} mgmt_handlers[] = { - { NULL }, /* 0x0000 (no command) */ - { read_version, false, MGMT_READ_VERSION_SIZE }, - { read_commands, false, MGMT_READ_COMMANDS_SIZE }, - { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE }, - { read_controller_info, false, MGMT_READ_INFO_SIZE }, - { set_powered, false, MGMT_SETTING_SIZE }, - { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE }, - { set_connectable, false, MGMT_SETTING_SIZE }, - { set_fast_connectable, false, MGMT_SETTING_SIZE }, - { set_bondable, false, MGMT_SETTING_SIZE }, - { set_link_security, false, MGMT_SETTING_SIZE }, - { set_ssp, false, MGMT_SETTING_SIZE }, - { set_hs, false, MGMT_SETTING_SIZE }, - { set_le, false, MGMT_SETTING_SIZE }, - { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE }, - { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE }, - { add_uuid, false, MGMT_ADD_UUID_SIZE }, - { remove_uuid, false, MGMT_REMOVE_UUID_SIZE }, - { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE }, - { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE }, - { disconnect, false, MGMT_DISCONNECT_SIZE }, - { get_connections, false, MGMT_GET_CONNECTIONS_SIZE }, - { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE }, - { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE }, - { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE }, - { pair_device, false, MGMT_PAIR_DEVICE_SIZE }, - { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE }, - { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE }, - { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE }, - { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE }, - { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE }, - { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE }, - { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE }, - { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE }, - { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE }, - { start_discovery, false, MGMT_START_DISCOVERY_SIZE }, - { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE }, - { confirm_name, false, MGMT_CONFIRM_NAME_SIZE }, - { block_device, false, MGMT_BLOCK_DEVICE_SIZE }, - { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE }, - { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE }, - { set_advertising, false, MGMT_SETTING_SIZE }, - { set_bredr, false, MGMT_SETTING_SIZE }, - { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE }, - { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE }, - { set_secure_conn, false, MGMT_SETTING_SIZE }, - { set_debug_keys, false, MGMT_SETTING_SIZE }, - { set_privacy, false, MGMT_SET_PRIVACY_SIZE }, - { load_irks, true, MGMT_LOAD_IRKS_SIZE }, - { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE }, - { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE }, - { add_device, false, MGMT_ADD_DEVICE_SIZE }, - { remove_device, false, MGMT_REMOVE_DEVICE_SIZE }, - { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE }, - { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE }, - { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE }, - { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE }, - { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE }, - { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE }, -}; - -int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) +static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, + u8 data_len) { - void *buf; - u8 *cp; - struct mgmt_hdr *hdr; - u16 opcode, index, len; - struct hci_dev *hdev = NULL; - const struct mgmt_handler *handler; - int err; + eir[eir_len++] = sizeof(type) + data_len; + eir[eir_len++] = type; + memcpy(&eir[eir_len], data, data_len); + eir_len += data_len; - BT_DBG("got %zu bytes", msglen); + return eir_len; +} - if (msglen < sizeof(*hdr)) - return -EINVAL; +static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_cp_read_local_oob_ext_data *cp = data; + struct mgmt_rp_read_local_oob_ext_data *rp; + size_t rp_len; + u16 eir_len; + u8 status, flags, role, addr[7], hash[16], rand[16]; + int err; - buf = kmalloc(msglen, GFP_KERNEL); - if (!buf) - return -ENOMEM; + BT_DBG("%s", hdev->name); - if (memcpy_from_msg(buf, msg, msglen)) { - err = -EFAULT; - goto done; + if (!hdev_is_powered(hdev)) + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, + MGMT_STATUS_NOT_POWERED, + &cp->type, sizeof(cp->type)); + + switch (cp->type) { + case BIT(BDADDR_BREDR): + status = mgmt_bredr_support(hdev); + if (status) + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, + status, &cp->type, + sizeof(cp->type)); + eir_len = 5; + break; + case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): + status = mgmt_le_support(hdev); + if (status) + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, + status, &cp->type, + sizeof(cp->type)); + eir_len = 9 + 3 + 18 + 18 + 3; + break; + default: + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, + MGMT_STATUS_INVALID_PARAMS, + &cp->type, sizeof(cp->type)); } - hdr = buf; - opcode = __le16_to_cpu(hdr->opcode); - index = __le16_to_cpu(hdr->index); - len = __le16_to_cpu(hdr->len); + hci_dev_lock(hdev); - if (len != msglen - sizeof(*hdr)) { - err = -EINVAL; - goto done; + rp_len = sizeof(*rp) + eir_len; + rp = kmalloc(rp_len, GFP_ATOMIC); + if (!rp) { + hci_dev_unlock(hdev); + return -ENOMEM; } - if (index != MGMT_INDEX_NONE) { - hdev = hci_dev_get(index); - if (!hdev) { - err = cmd_status(sk, index, opcode, - MGMT_STATUS_INVALID_INDEX); + eir_len = 0; + switch (cp->type) { + case BIT(BDADDR_BREDR): + eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV, + hdev->dev_class, 3); + break; + case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): + if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && + smp_generate_oob(hdev, hash, rand) < 0) { + hci_dev_unlock(hdev); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, + MGMT_STATUS_FAILED, + &cp->type, sizeof(cp->type)); goto done; } - if (test_bit(HCI_SETUP, &hdev->dev_flags) || - test_bit(HCI_CONFIG, &hdev->dev_flags) || - test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { - err = cmd_status(sk, index, opcode, - MGMT_STATUS_INVALID_INDEX); - goto done; + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { + memcpy(addr, &hdev->rpa, 6); + addr[6] = 0x01; + } else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + !bacmp(&hdev->bdaddr, BDADDR_ANY) || + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && + bacmp(&hdev->static_addr, BDADDR_ANY))) { + memcpy(addr, &hdev->static_addr, 6); + addr[6] = 0x01; + } else { + memcpy(addr, &hdev->bdaddr, 6); + addr[6] = 0x00; } - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) && - opcode != MGMT_OP_READ_CONFIG_INFO && - opcode != MGMT_OP_SET_EXTERNAL_CONFIG && - opcode != MGMT_OP_SET_PUBLIC_ADDRESS) { - err = cmd_status(sk, index, opcode, - MGMT_STATUS_INVALID_INDEX); - goto done; - } - } + eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR, + addr, sizeof(addr)); - if (opcode >= ARRAY_SIZE(mgmt_handlers) || - mgmt_handlers[opcode].func == NULL) { - BT_DBG("Unknown op %u", opcode); - err = cmd_status(sk, index, opcode, - MGMT_STATUS_UNKNOWN_COMMAND); - goto done; - } + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + role = 0x02; + else + role = 0x01; - if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST || - opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) { - err = cmd_status(sk, index, opcode, - MGMT_STATUS_INVALID_INDEX); - goto done; - } + eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE, + &role, sizeof(role)); - if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST && - opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) { - err = cmd_status(sk, index, opcode, - MGMT_STATUS_INVALID_INDEX); - goto done; - } + if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) { + eir_len = eir_append_data(rp->eir, eir_len, + EIR_LE_SC_CONFIRM, + hash, sizeof(hash)); - handler = &mgmt_handlers[opcode]; + eir_len = eir_append_data(rp->eir, eir_len, + EIR_LE_SC_RANDOM, + rand, sizeof(rand)); + } - if ((handler->var_len && len < handler->data_len) || - (!handler->var_len && len != handler->data_len)) { - err = cmd_status(sk, index, opcode, - MGMT_STATUS_INVALID_PARAMS); - goto done; + flags = get_adv_discov_flags(hdev); + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + flags |= LE_AD_NO_BREDR; + + eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS, + &flags, sizeof(flags)); + break; } - if (hdev) - mgmt_init_hdev(sk, hdev); + rp->type = cp->type; + rp->eir_len = cpu_to_le16(eir_len); + + hci_dev_unlock(hdev); - cp = buf + sizeof(*hdr); + hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS); - err = handler->func(sk, hdev, cp, len); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, + MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len); if (err < 0) goto done; - err = msglen; + err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, + rp, sizeof(*rp) + eir_len, + HCI_MGMT_OOB_DATA_EVENTS, sk); done: - if (hdev) - hci_dev_put(hdev); + kfree(rp); - kfree(buf); return err; } +static int read_adv_features(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_rp_read_adv_features *rp; + size_t rp_len; + int err; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + rp_len = sizeof(*rp); + rp = kmalloc(rp_len, GFP_ATOMIC); + if (!rp) { + hci_dev_unlock(hdev); + return -ENOMEM; + } + + rp->supported_flags = cpu_to_le32(0); + rp->max_adv_data_len = 31; + rp->max_scan_rsp_len = 31; + rp->max_instances = 0; + rp->num_instances = 0; + + hci_dev_unlock(hdev); + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, + MGMT_STATUS_SUCCESS, rp, rp_len); + + kfree(rp); + + return err; +} + +static const struct hci_mgmt_handler mgmt_handlers[] = { + { NULL }, /* 0x0000 (no command) */ + { read_version, MGMT_READ_VERSION_SIZE, + HCI_MGMT_NO_HDEV | + HCI_MGMT_UNTRUSTED }, + { read_commands, MGMT_READ_COMMANDS_SIZE, + HCI_MGMT_NO_HDEV | + HCI_MGMT_UNTRUSTED }, + { read_index_list, MGMT_READ_INDEX_LIST_SIZE, + HCI_MGMT_NO_HDEV | + HCI_MGMT_UNTRUSTED }, + { read_controller_info, MGMT_READ_INFO_SIZE, + HCI_MGMT_UNTRUSTED }, + { set_powered, MGMT_SETTING_SIZE }, + { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE }, + { set_connectable, MGMT_SETTING_SIZE }, + { set_fast_connectable, MGMT_SETTING_SIZE }, + { set_bondable, MGMT_SETTING_SIZE }, + { set_link_security, MGMT_SETTING_SIZE }, + { set_ssp, MGMT_SETTING_SIZE }, + { set_hs, MGMT_SETTING_SIZE }, + { set_le, MGMT_SETTING_SIZE }, + { set_dev_class, MGMT_SET_DEV_CLASS_SIZE }, + { set_local_name, MGMT_SET_LOCAL_NAME_SIZE }, + { add_uuid, MGMT_ADD_UUID_SIZE }, + { remove_uuid, MGMT_REMOVE_UUID_SIZE }, + { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE, + HCI_MGMT_VAR_LEN }, + { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE, + HCI_MGMT_VAR_LEN }, + { disconnect, MGMT_DISCONNECT_SIZE }, + { get_connections, MGMT_GET_CONNECTIONS_SIZE }, + { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE }, + { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE }, + { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE }, + { pair_device, MGMT_PAIR_DEVICE_SIZE }, + { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE }, + { unpair_device, MGMT_UNPAIR_DEVICE_SIZE }, + { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE }, + { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE }, + { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE }, + { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE }, + { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE }, + { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE, + HCI_MGMT_VAR_LEN }, + { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE }, + { start_discovery, MGMT_START_DISCOVERY_SIZE }, + { stop_discovery, MGMT_STOP_DISCOVERY_SIZE }, + { confirm_name, MGMT_CONFIRM_NAME_SIZE }, + { block_device, MGMT_BLOCK_DEVICE_SIZE }, + { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE }, + { set_device_id, MGMT_SET_DEVICE_ID_SIZE }, + { set_advertising, MGMT_SETTING_SIZE }, + { set_bredr, MGMT_SETTING_SIZE }, + { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE }, + { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE }, + { set_secure_conn, MGMT_SETTING_SIZE }, + { set_debug_keys, MGMT_SETTING_SIZE }, + { set_privacy, MGMT_SET_PRIVACY_SIZE }, + { load_irks, MGMT_LOAD_IRKS_SIZE, + HCI_MGMT_VAR_LEN }, + { get_conn_info, MGMT_GET_CONN_INFO_SIZE }, + { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE }, + { add_device, MGMT_ADD_DEVICE_SIZE }, + { remove_device, MGMT_REMOVE_DEVICE_SIZE }, + { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE, + HCI_MGMT_VAR_LEN }, + { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE, + HCI_MGMT_NO_HDEV | + HCI_MGMT_UNTRUSTED }, + { read_config_info, MGMT_READ_CONFIG_INFO_SIZE, + HCI_MGMT_UNCONFIGURED | + HCI_MGMT_UNTRUSTED }, + { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE, + HCI_MGMT_UNCONFIGURED }, + { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE, + HCI_MGMT_UNCONFIGURED }, + { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE, + HCI_MGMT_VAR_LEN }, + { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE }, + { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE, + HCI_MGMT_NO_HDEV | + HCI_MGMT_UNTRUSTED }, + { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE }, +}; + void mgmt_index_added(struct hci_dev *hdev) { - if (hdev->dev_type != HCI_BREDR) - return; + struct mgmt_ev_ext_index ev; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return; - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) - mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL); - else - mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); + switch (hdev->dev_type) { + case HCI_BREDR: + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, + NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); + ev.type = 0x01; + } else { + mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, + HCI_MGMT_INDEX_EVENTS); + ev.type = 0x00; + } + break; + case HCI_AMP: + ev.type = 0x02; + break; + default: + return; + } + + ev.bus = hdev->bus; + + mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev), + HCI_MGMT_EXT_INDEX_EVENTS); } void mgmt_index_removed(struct hci_dev *hdev) { + struct mgmt_ev_ext_index ev; u8 status = MGMT_STATUS_INVALID_INDEX; - if (hdev->dev_type != HCI_BREDR) + if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return; - if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + switch (hdev->dev_type) { + case HCI_BREDR: + mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, + NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); + ev.type = 0x01; + } else { + mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, + HCI_MGMT_INDEX_EVENTS); + ev.type = 0x00; + } + break; + case HCI_AMP: + ev.type = 0x02; + break; + default: return; + } - mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); + ev.bus = hdev->bus; - if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) - mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL); - else - mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); + mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev), + HCI_MGMT_EXT_INDEX_EVENTS); } /* This function requires the caller holds hdev->lock */ @@ -6377,7 +6530,7 @@ static int powered_update_hci(struct hci_dev *hdev) hci_req_init(&req, hdev); - if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && !lmp_host_ssp_capable(hdev)) { u8 mode = 0x01; @@ -6391,7 +6544,7 @@ static int powered_update_hci(struct hci_dev *hdev) } } - if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) && lmp_bredr_capable(hdev)) { struct hci_cp_write_le_host_supported cp; @@ -6412,24 +6565,27 @@ static int powered_update_hci(struct hci_dev *hdev) * advertising data. This also applies to the case * where BR/EDR was toggled during the AUTO_OFF phase. */ - if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { update_adv_data(&req); update_scan_rsp_data(&req); } - if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) enable_advertising(&req); restart_le_actions(&req); } - link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags); + link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(link_sec), &link_sec); if (lmp_bredr_capable(hdev)) { - write_fast_connectable(&req, false); + if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) + write_fast_connectable(&req, true); + else + write_fast_connectable(&req, false); __hci_update_page_scan(&req); update_class(&req); update_name(&req); @@ -6445,7 +6601,7 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) u8 status, zero_cod[] = { 0, 0, 0 }; int err; - if (!test_bit(HCI_MGMT, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_MGMT)) return 0; if (powered) { @@ -6466,7 +6622,7 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) * been triggered, potentially causing misleading DISCONNECTED * status responses. */ - if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) status = MGMT_STATUS_INVALID_INDEX; else status = MGMT_STATUS_NOT_POWERED; @@ -6474,8 +6630,8 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) - mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, - zero_cod, sizeof(zero_cod), NULL); + mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, + zero_cod, sizeof(zero_cod), NULL); new_settings: err = new_settings(hdev, match.sk); @@ -6488,10 +6644,10 @@ new_settings: void mgmt_set_powered_failed(struct hci_dev *hdev, int err) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; u8 status; - cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); + cmd = pending_find(MGMT_OP_SET_POWERED, hdev); if (!cmd) return; @@ -6500,7 +6656,7 @@ void mgmt_set_powered_failed(struct hci_dev *hdev, int err) else status = MGMT_STATUS_FAILED; - cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); mgmt_pending_remove(cmd); } @@ -6516,11 +6672,11 @@ void mgmt_discoverable_timeout(struct hci_dev *hdev) * of a timeout triggered from general discoverable, it is * safe to unconditionally clear the flag. */ - clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); - clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hci_req_init(&req, hdev); - if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { u8 scan = SCAN_PAGE; hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); @@ -6691,17 +6847,6 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL); } -static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, - u8 data_len) -{ - eir[eir_len++] = sizeof(type) + data_len; - eir[eir_len++] = type; - memcpy(&eir[eir_len], data, data_len); - eir_len += data_len; - - return eir_len; -} - void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, u32 flags, u8 *name, u8 name_len) { @@ -6739,7 +6884,7 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, sizeof(*ev) + eir_len, NULL); } -static void disconnect_rsp(struct pending_cmd *cmd, void *data) +static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct sock **sk = data; @@ -6751,7 +6896,7 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data) mgmt_pending_remove(cmd); } -static void unpair_device_rsp(struct pending_cmd *cmd, void *data) +static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct hci_dev *hdev = data; struct mgmt_cp_unpair_device *cp = cmd->param; @@ -6764,10 +6909,10 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data) bool mgmt_powering_down(struct hci_dev *hdev) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; struct mgmt_mode *cp; - cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); + cmd = pending_find(MGMT_OP_SET_POWERED, hdev); if (!cmd) return false; @@ -6819,12 +6964,12 @@ void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, { u8 bdaddr_type = link_to_bdaddr(link_type, addr_type); struct mgmt_cp_disconnect *cp; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, hdev); - cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); + cmd = pending_find(MGMT_OP_DISCONNECT, hdev); if (!cmd) return; @@ -6874,9 +7019,9 @@ void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; - cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); + cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); if (!cmd) return; @@ -6887,9 +7032,9 @@ void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; - cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); + cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); if (!cmd) return; @@ -6932,9 +7077,9 @@ static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status, u8 opcode) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; - cmd = mgmt_pending_find(opcode, hdev); + cmd = pending_find(opcode, hdev); if (!cmd) return -ENOENT; @@ -6993,7 +7138,7 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status) { struct mgmt_ev_auth_failed ev; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; u8 status = mgmt_status(hci_status); bacpy(&ev.addr.bdaddr, &conn->dst); @@ -7024,11 +7169,9 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) } if (test_bit(HCI_AUTH, &hdev->flags)) - changed = !test_and_set_bit(HCI_LINK_SECURITY, - &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY); else - changed = test_and_clear_bit(HCI_LINK_SECURITY, - &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY); mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, &match); @@ -7064,9 +7207,9 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) if (status) { u8 mgmt_err = mgmt_status(status); - if (enable && test_and_clear_bit(HCI_SSP_ENABLED, - &hdev->dev_flags)) { - clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); + if (enable && hci_dev_test_and_clear_flag(hdev, + HCI_SSP_ENABLED)) { + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); new_settings(hdev, NULL); } @@ -7076,14 +7219,14 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) } if (enable) { - changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); + changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); } else { - changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); if (!changed) - changed = test_and_clear_bit(HCI_HS_ENABLED, - &hdev->dev_flags); + changed = hci_dev_test_and_clear_flag(hdev, + HCI_HS_ENABLED); else - clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); @@ -7096,8 +7239,8 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) hci_req_init(&req, hdev); - if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { - if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { + if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(enable), &enable); update_eir(&req); @@ -7108,7 +7251,7 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) hci_req_run(&req, NULL); } -static void sk_lookup(struct pending_cmd *cmd, void *data) +static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data) { struct cmd_lookup *match = data; @@ -7128,8 +7271,8 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); if (!status) - mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3, - NULL); + mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, + dev_class, 3, NULL); if (match.sk) sock_put(match.sk); @@ -7138,7 +7281,7 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) { struct mgmt_cp_set_local_name ev; - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; if (status) return; @@ -7147,36 +7290,36 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH); - cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); + cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); if (!cmd) { memcpy(hdev->dev_name, name, sizeof(hdev->dev_name)); /* If this is a HCI command related to powering on the * HCI dev don't send any mgmt signals. */ - if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) + if (pending_find(MGMT_OP_SET_POWERED, hdev)) return; } - mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), - cmd ? cmd->sk : NULL); + mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), + cmd ? cmd->sk : NULL); } void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, u8 *rand192, u8 *hash256, u8 *rand256, u8 status) { - struct pending_cmd *cmd; + struct mgmt_pending_cmd *cmd; BT_DBG("%s status %u", hdev->name, status); - cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); + cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); if (!cmd) return; if (status) { - cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, - mgmt_status(status)); + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + mgmt_status(status)); } else { struct mgmt_rp_read_local_oob_data rp; size_t rp_size = sizeof(rp); @@ -7191,8 +7334,9 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256); } - cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 0, - &rp, rp_size); + mgmt_cmd_complete(cmd->sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_DATA, 0, + &rp, rp_size); } mgmt_pending_remove(cmd); @@ -7268,7 +7412,7 @@ static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16]) static void restart_le_scan(struct hci_dev *hdev) { /* If controller is not scanning we are done. */ - if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; if (time_after(jiffies + DISCOV_LE_RESTART_DELAY, @@ -7280,14 +7424,58 @@ static void restart_le_scan(struct hci_dev *hdev) DISCOV_LE_RESTART_DELAY); } +static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir, + u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) +{ + /* If a RSSI threshold has been specified, and + * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with + * a RSSI smaller than the RSSI threshold will be dropped. If the quirk + * is set, let it through for further processing, as we might need to + * restart the scan. + * + * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry, + * the results are also dropped. + */ + if (hdev->discovery.rssi != HCI_RSSI_INVALID && + (rssi == HCI_RSSI_INVALID || + (rssi < hdev->discovery.rssi && + !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)))) + return false; + + if (hdev->discovery.uuid_count != 0) { + /* If a list of UUIDs is provided in filter, results with no + * matching UUID should be dropped. + */ + if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count, + hdev->discovery.uuids) && + !eir_has_uuids(scan_rsp, scan_rsp_len, + hdev->discovery.uuid_count, + hdev->discovery.uuids)) + return false; + } + + /* If duplicate filtering does not report RSSI changes, then restart + * scanning to ensure updated result with updated RSSI values. + */ + if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) { + restart_le_scan(hdev); + + /* Validate RSSI value against the RSSI threshold once more. */ + if (hdev->discovery.rssi != HCI_RSSI_INVALID && + rssi < hdev->discovery.rssi) + return false; + } + + return true; +} + void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) { char buf[512]; - struct mgmt_ev_device_found *ev = (void *) buf; + struct mgmt_ev_device_found *ev = (void *)buf; size_t ev_size; - bool match; /* Don't send events for a non-kernel initiated discovery. With * LE one exception is if we have pend_le_reports > 0 in which @@ -7300,21 +7488,12 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, return; } - /* When using service discovery with a RSSI threshold, then check - * if such a RSSI threshold is specified. If a RSSI threshold has - * been specified, and HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, - * then all results with a RSSI smaller than the RSSI threshold will be - * dropped. If the quirk is set, let it through for further processing, - * as we might need to restart the scan. - * - * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry, - * the results are also dropped. - */ - if (hdev->discovery.rssi != HCI_RSSI_INVALID && - (rssi == HCI_RSSI_INVALID || - (rssi < hdev->discovery.rssi && - !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)))) - return; + if (hdev->discovery.result_filtering) { + /* We are using service discovery */ + if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp, + scan_rsp_len)) + return; + } /* Make sure that the buffer is big enough. The 5 extra bytes * are for the potential CoD field. @@ -7341,87 +7520,17 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, ev->rssi = rssi; ev->flags = cpu_to_le32(flags); - if (eir_len > 0) { - /* When using service discovery and a list of UUID is - * provided, results with no matching UUID should be - * dropped. In case there is a match the result is - * kept and checking possible scan response data - * will be skipped. - */ - if (hdev->discovery.uuid_count > 0) { - match = eir_has_uuids(eir, eir_len, - hdev->discovery.uuid_count, - hdev->discovery.uuids); - /* If duplicate filtering does not report RSSI changes, - * then restart scanning to ensure updated result with - * updated RSSI values. - */ - if (match && test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, - &hdev->quirks)) - restart_le_scan(hdev); - } else { - match = true; - } - - if (!match && !scan_rsp_len) - return; - + if (eir_len > 0) /* Copy EIR or advertising data into event */ memcpy(ev->eir, eir, eir_len); - } else { - /* When using service discovery and a list of UUID is - * provided, results with empty EIR or advertising data - * should be dropped since they do not match any UUID. - */ - if (hdev->discovery.uuid_count > 0 && !scan_rsp_len) - return; - - match = false; - } if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV)) eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, dev_class, 3); - if (scan_rsp_len > 0) { - /* When using service discovery and a list of UUID is - * provided, results with no matching UUID should be - * dropped if there is no previous match from the - * advertising data. - */ - if (hdev->discovery.uuid_count > 0) { - if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len, - hdev->discovery.uuid_count, - hdev->discovery.uuids)) - return; - - /* If duplicate filtering does not report RSSI changes, - * then restart scanning to ensure updated result with - * updated RSSI values. - */ - if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, - &hdev->quirks)) - restart_le_scan(hdev); - } - + if (scan_rsp_len > 0) /* Append scan response data to event */ memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len); - } else { - /* When using service discovery and a list of UUID is - * provided, results with empty scan response and no - * previous matched advertising data should be dropped. - */ - if (hdev->discovery.uuid_count > 0 && !match) - return; - } - - /* Validate the reported RSSI value against the RSSI threshold once more - * incase HCI_QUIRK_STRICT_DUPLICATE_FILTER forced a restart of LE - * scanning. - */ - if (hdev->discovery.rssi != HCI_RSSI_INVALID && - rssi < hdev->discovery.rssi) - return; ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); ev_size = sizeof(*ev) + eir_len + scan_rsp_len; @@ -7474,10 +7583,27 @@ void mgmt_reenable_advertising(struct hci_dev *hdev) { struct hci_request req; - if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_ADVERTISING)) return; hci_req_init(&req, hdev); enable_advertising(&req); hci_req_run(&req, adv_enable_complete); } + +static struct hci_mgmt_chan chan = { + .channel = HCI_CHANNEL_CONTROL, + .handler_count = ARRAY_SIZE(mgmt_handlers), + .handlers = mgmt_handlers, + .hdev_init = mgmt_init_hdev, +}; + +int mgmt_init(void) +{ + return hci_mgmt_chan_register(&chan); +} + +void mgmt_exit(void) +{ + hci_mgmt_chan_unregister(&chan); +} diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c new file mode 100644 index 000000000000..8c30c7eb8bef --- /dev/null +++ b/net/bluetooth/mgmt_util.c @@ -0,0 +1,210 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + + Copyright (C) 2015 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/mgmt.h> + +#include "mgmt_util.h" + +int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, + void *data, u16 data_len, int flag, struct sock *skip_sk) +{ + struct sk_buff *skb; + struct mgmt_hdr *hdr; + + skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = (void *) skb_put(skb, sizeof(*hdr)); + hdr->opcode = cpu_to_le16(event); + if (hdev) + hdr->index = cpu_to_le16(hdev->id); + else + hdr->index = cpu_to_le16(MGMT_INDEX_NONE); + hdr->len = cpu_to_le16(data_len); + + if (data) + memcpy(skb_put(skb, data_len), data, data_len); + + /* Time stamp */ + __net_timestamp(skb); + + hci_send_to_channel(channel, skb, flag, skip_sk); + kfree_skb(skb); + + return 0; +} + +int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) +{ + struct sk_buff *skb; + struct mgmt_hdr *hdr; + struct mgmt_ev_cmd_status *ev; + int err; + + BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); + + skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = (void *) skb_put(skb, sizeof(*hdr)); + + hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); + hdr->index = cpu_to_le16(index); + hdr->len = cpu_to_le16(sizeof(*ev)); + + ev = (void *) skb_put(skb, sizeof(*ev)); + ev->status = status; + ev->opcode = cpu_to_le16(cmd); + + err = sock_queue_rcv_skb(sk, skb); + if (err < 0) + kfree_skb(skb); + + return err; +} + +int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, + void *rp, size_t rp_len) +{ + struct sk_buff *skb; + struct mgmt_hdr *hdr; + struct mgmt_ev_cmd_complete *ev; + int err; + + BT_DBG("sock %p", sk); + + skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = (void *) skb_put(skb, sizeof(*hdr)); + + hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); + hdr->index = cpu_to_le16(index); + hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); + + ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); + ev->opcode = cpu_to_le16(cmd); + ev->status = status; + + if (rp) + memcpy(ev->data, rp, rp_len); + + err = sock_queue_rcv_skb(sk, skb); + if (err < 0) + kfree_skb(skb); + + return err; +} + +struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, + struct hci_dev *hdev) +{ + struct mgmt_pending_cmd *cmd; + + list_for_each_entry(cmd, &hdev->mgmt_pending, list) { + if (hci_sock_get_channel(cmd->sk) != channel) + continue; + if (cmd->opcode == opcode) + return cmd; + } + + return NULL; +} + +struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, + u16 opcode, + struct hci_dev *hdev, + const void *data) +{ + struct mgmt_pending_cmd *cmd; + + list_for_each_entry(cmd, &hdev->mgmt_pending, list) { + if (cmd->user_data != data) + continue; + if (cmd->opcode == opcode) + return cmd; + } + + return NULL; +} + +void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, + void (*cb)(struct mgmt_pending_cmd *cmd, void *data), + void *data) +{ + struct mgmt_pending_cmd *cmd, *tmp; + + list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { + if (opcode > 0 && cmd->opcode != opcode) + continue; + + cb(cmd, data); + } +} + +struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, + struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_pending_cmd *cmd; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return NULL; + + cmd->opcode = opcode; + cmd->index = hdev->id; + + cmd->param = kmemdup(data, len, GFP_KERNEL); + if (!cmd->param) { + kfree(cmd); + return NULL; + } + + cmd->param_len = len; + + cmd->sk = sk; + sock_hold(sk); + + list_add(&cmd->list, &hdev->mgmt_pending); + + return cmd; +} + +void mgmt_pending_free(struct mgmt_pending_cmd *cmd) +{ + sock_put(cmd->sk); + kfree(cmd->param); + kfree(cmd); +} + +void mgmt_pending_remove(struct mgmt_pending_cmd *cmd) +{ + list_del(&cmd->list); + mgmt_pending_free(cmd); +} diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h new file mode 100644 index 000000000000..6559f189213c --- /dev/null +++ b/net/bluetooth/mgmt_util.h @@ -0,0 +1,53 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2015 Intel Coropration + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +struct mgmt_pending_cmd { + struct list_head list; + u16 opcode; + int index; + void *param; + size_t param_len; + struct sock *sk; + void *user_data; + int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status); +}; + +int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, + void *data, u16 data_len, int flag, struct sock *skip_sk); +int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status); +int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, + void *rp, size_t rp_len); + +struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, + struct hci_dev *hdev); +struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, + u16 opcode, + struct hci_dev *hdev, + const void *data); +void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, + void (*cb)(struct mgmt_pending_cmd *cmd, void *data), + void *data); +struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, + struct hci_dev *hdev, + void *data, u16 len); +void mgmt_pending_free(struct mgmt_pending_cmd *cmd); +void mgmt_pending_remove(struct mgmt_pending_cmd *cmd); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 54279ac28120..4322c833e748 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -1231,7 +1231,7 @@ error: return err; } -void __exit sco_exit(void) +void sco_exit(void) { bt_procfs_cleanup(&init_net, "sco"); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index c91c19bfc0a8..1ec3f66b5a74 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -52,7 +52,7 @@ #define SMP_TIMEOUT msecs_to_jiffies(30000) -#define AUTH_REQ_MASK(dev) (test_bit(HCI_SC_ENABLED, &(dev)->dev_flags) ? \ +#define AUTH_REQ_MASK(dev) (hci_dev_test_flag(dev, HCI_SC_ENABLED) ? \ 0x1f : 0x07) #define KEY_DIST_MASK 0x07 @@ -70,7 +70,19 @@ enum { SMP_FLAG_DEBUG_KEY, SMP_FLAG_WAIT_USER, SMP_FLAG_DHKEY_PENDING, - SMP_FLAG_OOB, + SMP_FLAG_REMOTE_OOB, + SMP_FLAG_LOCAL_OOB, +}; + +struct smp_dev { + /* Secure Connections OOB data */ + u8 local_pk[64]; + u8 local_sk[32]; + u8 local_rand[16]; + bool debug_key; + + struct crypto_blkcipher *tfm_aes; + struct crypto_hash *tfm_cmac; }; struct smp_chan { @@ -84,7 +96,8 @@ struct smp_chan { u8 rrnd[16]; /* SMP Pairing Random (remote) */ u8 pcnf[16]; /* SMP Pairing Confirm */ u8 tk[16]; /* SMP Temporary Key */ - u8 rr[16]; + u8 rr[16]; /* Remote OOB ra/rb value */ + u8 lr[16]; /* Local OOB ra/rb value */ u8 enc_key_size; u8 remote_key_dist; bdaddr_t id_addr; @@ -478,18 +491,18 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], const bdaddr_t *bdaddr) { struct l2cap_chan *chan = hdev->smp_data; - struct crypto_blkcipher *tfm; + struct smp_dev *smp; u8 hash[3]; int err; if (!chan || !chan->data) return false; - tfm = chan->data; + smp = chan->data; BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk); - err = smp_ah(tfm, irk, &bdaddr->b[3], hash); + err = smp_ah(smp->tfm_aes, irk, &bdaddr->b[3], hash); if (err) return false; @@ -499,20 +512,20 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa) { struct l2cap_chan *chan = hdev->smp_data; - struct crypto_blkcipher *tfm; + struct smp_dev *smp; int err; if (!chan || !chan->data) return -EOPNOTSUPP; - tfm = chan->data; + smp = chan->data; get_random_bytes(&rpa->b[3], 3); rpa->b[5] &= 0x3f; /* Clear two most significant bits */ rpa->b[5] |= 0x40; /* Set second most significant bit */ - err = smp_ah(tfm, irk, &rpa->b[3], rpa->b); + err = smp_ah(smp->tfm_aes, irk, &rpa->b[3], rpa->b); if (err < 0) return err; @@ -521,6 +534,53 @@ int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa) return 0; } +int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) +{ + struct l2cap_chan *chan = hdev->smp_data; + struct smp_dev *smp; + int err; + + if (!chan || !chan->data) + return -EOPNOTSUPP; + + smp = chan->data; + + if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { + BT_DBG("Using debug keys"); + memcpy(smp->local_pk, debug_pk, 64); + memcpy(smp->local_sk, debug_sk, 32); + smp->debug_key = true; + } else { + while (true) { + /* Generate local key pair for Secure Connections */ + if (!ecc_make_key(smp->local_pk, smp->local_sk)) + return -EIO; + + /* This is unlikely, but we need to check that + * we didn't accidentially generate a debug key. + */ + if (memcmp(smp->local_sk, debug_sk, 32)) + break; + } + smp->debug_key = false; + } + + SMP_DBG("OOB Public Key X: %32phN", smp->local_pk); + SMP_DBG("OOB Public Key Y: %32phN", smp->local_pk + 32); + SMP_DBG("OOB Private Key: %32phN", smp->local_sk); + + get_random_bytes(smp->local_rand, 16); + + err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->local_pk, + smp->local_rand, 0, hash); + if (err < 0) + return err; + + memcpy(rand, smp->local_rand, 16); + + return 0; +} + static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) { struct l2cap_chan *chan = conn->smp; @@ -589,7 +649,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn, struct hci_dev *hdev = hcon->hdev; u8 local_dist = 0, remote_dist = 0, oob_flag = SMP_OOB_NOT_PRESENT; - if (test_bit(HCI_BONDABLE, &conn->hcon->hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_BONDABLE)) { local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; authreq |= SMP_AUTH_BONDING; @@ -597,18 +657,18 @@ static void build_pairing_cmd(struct l2cap_conn *conn, authreq &= ~SMP_AUTH_BONDING; } - if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING)) remote_dist |= SMP_DIST_ID_KEY; - if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) local_dist |= SMP_DIST_ID_KEY; - if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) && + if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && (authreq & SMP_AUTH_SC)) { struct oob_data *oob_data; u8 bdaddr_type; - if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { local_dist |= SMP_DIST_LINK_KEY; remote_dist |= SMP_DIST_LINK_KEY; } @@ -621,10 +681,12 @@ static void build_pairing_cmd(struct l2cap_conn *conn, oob_data = hci_find_remote_oob_data(hdev, &hcon->dst, bdaddr_type); if (oob_data && oob_data->present) { - set_bit(SMP_FLAG_OOB, &smp->flags); + set_bit(SMP_FLAG_REMOTE_OOB, &smp->flags); oob_flag = SMP_OOB_PRESENT; memcpy(smp->rr, oob_data->rand256, 16); memcpy(smp->pcnf, oob_data->hash256, 16); + SMP_DBG("OOB Remote Confirmation: %16phN", smp->pcnf); + SMP_DBG("OOB Remote Random: %16phN", smp->rr); } } else { @@ -681,9 +743,9 @@ static void smp_chan_destroy(struct l2cap_conn *conn) complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); mgmt_smp_complete(hcon, complete); - kfree(smp->csrk); - kfree(smp->slave_csrk); - kfree(smp->link_key); + kzfree(smp->csrk); + kzfree(smp->slave_csrk); + kzfree(smp->link_key); crypto_free_blkcipher(smp->tfm_aes); crypto_free_hash(smp->tfm_cmac); @@ -692,7 +754,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn) * support hasn't been explicitly enabled. */ if (smp->ltk && smp->ltk->type == SMP_LTK_P256_DEBUG && - !test_bit(HCI_KEEP_DEBUG_KEYS, &hcon->hdev->dev_flags)) { + !hci_dev_test_flag(hcon->hdev, HCI_KEEP_DEBUG_KEYS)) { list_del_rcu(&smp->ltk->list); kfree_rcu(smp->ltk, rcu); smp->ltk = NULL; @@ -717,7 +779,7 @@ static void smp_chan_destroy(struct l2cap_conn *conn) } chan->data = NULL; - kfree(smp); + kzfree(smp); hci_conn_drop(hcon); } @@ -818,6 +880,12 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, return 0; } + /* If this function is used for SC -> legacy fallback we + * can only recover the just-works case. + */ + if (test_bit(SMP_FLAG_SC, &smp->flags)) + return -EINVAL; + /* Not Just Works/Confirm results in MITM Authentication */ if (smp->method != JUST_CFM) { set_bit(SMP_FLAG_MITM_AUTH, &smp->flags); @@ -1052,7 +1120,7 @@ static void smp_notify_keys(struct l2cap_conn *conn) /* Don't keep debug keys around if the relevant * flag is not set. */ - if (!test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags) && + if (!hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS) && key->type == HCI_LK_DEBUG_COMBINATION) { list_del_rcu(&key->list); kfree_rcu(key, rcu); @@ -1097,13 +1165,13 @@ static void sc_generate_link_key(struct smp_chan *smp) return; if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) { - kfree(smp->link_key); + kzfree(smp->link_key); smp->link_key = NULL; return; } if (smp_h6(smp->tfm_cmac, smp->link_key, lebr, smp->link_key)) { - kfree(smp->link_key); + kzfree(smp->link_key); smp->link_key = NULL; return; } @@ -1300,7 +1368,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(smp->tfm_aes)) { BT_ERR("Unable to create ECB crypto context"); - kfree(smp); + kzfree(smp); return NULL; } @@ -1308,7 +1376,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) if (IS_ERR(smp->tfm_cmac)) { BT_ERR("Unable to create CMAC crypto context"); crypto_free_blkcipher(smp->tfm_aes); - kfree(smp); + kzfree(smp); return NULL; } @@ -1604,15 +1672,15 @@ static void build_bredr_pairing_cmd(struct smp_chan *smp, struct hci_dev *hdev = conn->hcon->hdev; u8 local_dist = 0, remote_dist = 0; - if (test_bit(HCI_BONDABLE, &hdev->dev_flags)) { + if (hci_dev_test_flag(hdev, HCI_BONDABLE)) { local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; } - if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING)) remote_dist |= SMP_DIST_ID_KEY; - if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) local_dist |= SMP_DIST_ID_KEY; if (!rsp) { @@ -1664,22 +1732,29 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) /* We didn't start the pairing, so match remote */ auth = req->auth_req & AUTH_REQ_MASK(hdev); - if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) && + if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && (auth & SMP_AUTH_BONDING)) return SMP_PAIRING_NOTSUPP; - if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC)) + if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC)) return SMP_AUTH_REQUIREMENTS; smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], req, sizeof(*req)); skb_pull(skb, sizeof(*req)); + /* If the remote side's OOB flag is set it means it has + * successfully received our local OOB data - therefore set the + * flag to indicate that local OOB is in use. + */ + if (req->oob_flag == SMP_OOB_PRESENT) + set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); + /* SMP over BR/EDR requires special treatment */ if (conn->hcon->type == ACL_LINK) { /* We must have a BR/EDR SC link */ if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) && - !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags)) + !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) return SMP_CROSS_TRANSP_NOT_ALLOWED; set_bit(SMP_FLAG_SC, &smp->flags); @@ -1737,14 +1812,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) clear_bit(SMP_FLAG_INITIATOR, &smp->flags); + /* Strictly speaking we shouldn't allow Pairing Confirm for the + * SC case, however some implementations incorrectly copy RFU auth + * req bits from our security request, which may create a false + * positive SC enablement. + */ + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + if (test_bit(SMP_FLAG_SC, &smp->flags)) { SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY); /* Clear bits which are generated but not distributed */ smp->remote_key_dist &= ~SMP_SC_NO_DIST; /* Wait for Public Key from Initiating Device */ return 0; - } else { - SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); } /* Request setup of TK */ @@ -1761,7 +1841,26 @@ static u8 sc_send_public_key(struct smp_chan *smp) BT_DBG(""); - if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags)) { + if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { + struct l2cap_chan *chan = hdev->smp_data; + struct smp_dev *smp_dev; + + if (!chan || !chan->data) + return SMP_UNSPECIFIED; + + smp_dev = chan->data; + + memcpy(smp->local_pk, smp_dev->local_pk, 64); + memcpy(smp->local_sk, smp_dev->local_sk, 32); + memcpy(smp->lr, smp_dev->local_rand, 16); + + if (smp_dev->debug_key) + set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); + + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { BT_DBG("Using debug keys"); memcpy(smp->local_pk, debug_pk, 64); memcpy(smp->local_sk, debug_sk, 32); @@ -1780,8 +1879,9 @@ static u8 sc_send_public_key(struct smp_chan *smp) } } +done: SMP_DBG("Local Public Key X: %32phN", smp->local_pk); - SMP_DBG("Local Public Key Y: %32phN", &smp->local_pk[32]); + SMP_DBG("Local Public Key Y: %32phN", smp->local_pk + 32); SMP_DBG("Local Private Key: %32phN", smp->local_sk); smp_send_cmd(smp->conn, SMP_CMD_PUBLIC_KEY, 64, smp->local_pk); @@ -1816,9 +1916,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) auth = rsp->auth_req & AUTH_REQ_MASK(hdev); - if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC)) + if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC)) return SMP_AUTH_REQUIREMENTS; + /* If the remote side's OOB flag is set it means it has + * successfully received our local OOB data - therefore set the + * flag to indicate that local OOB is in use. + */ + if (rsp->oob_flag == SMP_OOB_PRESENT) + set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); + smp->prsp[0] = SMP_CMD_PAIRING_RSP; memcpy(&smp->prsp[1], rsp, sizeof(*rsp)); @@ -1885,10 +1992,6 @@ static u8 sc_check_confirm(struct smp_chan *smp) BT_DBG(""); - /* Public Key exchange must happen before any other steps */ - if (!test_bit(SMP_FLAG_REMOTE_PK, &smp->flags)) - return SMP_UNSPECIFIED; - if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM); @@ -1901,6 +2004,47 @@ static u8 sc_check_confirm(struct smp_chan *smp) return 0; } +/* Work-around for some implementations that incorrectly copy RFU bits + * from our security request and thereby create the impression that + * we're doing SC when in fact the remote doesn't support it. + */ +static int fixup_sc_false_positive(struct smp_chan *smp) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct smp_cmd_pairing *req, *rsp; + u8 auth; + + /* The issue is only observed when we're in slave role */ + if (hcon->out) + return SMP_UNSPECIFIED; + + if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { + BT_ERR("Refusing SMP SC -> legacy fallback in SC-only mode"); + return SMP_UNSPECIFIED; + } + + BT_ERR("Trying to fall back to legacy SMP"); + + req = (void *) &smp->preq[1]; + rsp = (void *) &smp->prsp[1]; + + /* Rebuild key dist flags which may have been cleared for SC */ + smp->remote_key_dist = (req->init_key_dist & rsp->resp_key_dist); + + auth = req->auth_req & AUTH_REQ_MASK(hdev); + + if (tk_request(conn, 0, auth, rsp->io_capability, req->io_capability)) { + BT_ERR("Failed to fall back to legacy SMP"); + return SMP_UNSPECIFIED; + } + + clear_bit(SMP_FLAG_SC, &smp->flags); + + return 0; +} + static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) { struct l2cap_chan *chan = conn->smp; @@ -1914,8 +2058,19 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf)); skb_pull(skb, sizeof(smp->pcnf)); - if (test_bit(SMP_FLAG_SC, &smp->flags)) - return sc_check_confirm(smp); + if (test_bit(SMP_FLAG_SC, &smp->flags)) { + int ret; + + /* Public Key exchange must happen before any other steps */ + if (test_bit(SMP_FLAG_REMOTE_PK, &smp->flags)) + return sc_check_confirm(smp); + + BT_ERR("Unexpected SMP Pairing Confirm"); + + ret = fixup_sc_false_positive(smp); + if (ret) + return ret; + } if (conn->hcon->out) { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), @@ -1926,8 +2081,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) return smp_confirm(smp); - else - set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); + + set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); return 0; } @@ -2086,7 +2241,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) auth = rp->auth_req & AUTH_REQ_MASK(hdev); - if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) && !(auth & SMP_AUTH_SC)) + if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC)) return SMP_AUTH_REQUIREMENTS; if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) @@ -2107,7 +2262,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) if (!smp) return SMP_UNSPECIFIED; - if (!test_bit(HCI_BONDABLE, &hcon->hdev->dev_flags) && + if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && (auth & SMP_AUTH_BONDING)) return SMP_PAIRING_NOTSUPP; @@ -2141,7 +2296,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) chan = conn->smp; - if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) + if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) return 1; if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) @@ -2170,7 +2325,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) authreq = seclevel_to_authreq(sec_level); - if (test_bit(HCI_SC_ENABLED, &hcon->hdev->dev_flags)) + if (hci_dev_test_flag(hcon->hdev, HCI_SC_ENABLED)) authreq |= SMP_AUTH_SC; /* Require MITM if IO Capability allows or the security level @@ -2374,7 +2529,8 @@ static u8 sc_select_method(struct smp_chan *smp) struct smp_cmd_pairing *local, *remote; u8 local_mitm, remote_mitm, local_io, remote_io, method; - if (test_bit(SMP_FLAG_OOB, &smp->flags)) + if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags) || + test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) return REQ_OOB; /* The preq/prsp contain the raw Pairing Request/Response PDUs @@ -2428,6 +2584,16 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) memcpy(smp->remote_pk, key, 64); + if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) { + err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk, + smp->rr, 0, cfm.confirm_val); + if (err) + return SMP_UNSPECIFIED; + + if (memcmp(cfm.confirm_val, smp->pcnf, 16)) + return SMP_CONFIRM_FAILED; + } + /* Non-initiating device sends its public key after receiving * the key from the initiating device. */ @@ -2438,7 +2604,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) } SMP_DBG("Remote Public Key X: %32phN", smp->remote_pk); - SMP_DBG("Remote Public Key Y: %32phN", &smp->remote_pk[32]); + SMP_DBG("Remote Public Key Y: %32phN", smp->remote_pk + 32); if (!ecdh_shared_secret(smp->remote_pk, smp->local_sk, smp->dhkey)) return SMP_UNSPECIFIED; @@ -2476,14 +2642,6 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) } if (smp->method == REQ_OOB) { - err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk, - smp->rr, 0, cfm.confirm_val); - if (err) - return SMP_UNSPECIFIED; - - if (memcmp(cfm.confirm_val, smp->pcnf, 16)) - return SMP_CONFIRM_FAILED; - if (hcon->out) smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), smp->prnd); @@ -2556,6 +2714,8 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) put_unaligned_le32(hcon->passkey_notify, r); + else if (smp->method == REQ_OOB) + memcpy(r, smp->lr, 16); err = smp_f6(smp->tfm_cmac, smp->mackey, smp->rrnd, smp->prnd, r, io_cap, remote_addr, local_addr, e); @@ -2606,7 +2766,7 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb) if (skb->len < 1) return -EILSEQ; - if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) { + if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) { reason = SMP_PAIRING_NOTSUPP; goto done; } @@ -2744,16 +2904,16 @@ static void bredr_pairing(struct l2cap_chan *chan) return; /* Secure Connections support must be enabled */ - if (!test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_SC_ENABLED)) return; /* BR/EDR must use Secure Connections for SMP */ if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) && - !test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags)) + !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) return; /* If our LE support is not enabled don't do anything */ - if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; /* Don't bother if remote LE support is not enabled */ @@ -2930,27 +3090,49 @@ static const struct l2cap_ops smp_root_chan_ops = { static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) { struct l2cap_chan *chan; - struct crypto_blkcipher *tfm_aes; + struct smp_dev *smp; + struct crypto_blkcipher *tfm_aes; + struct crypto_hash *tfm_cmac; if (cid == L2CAP_CID_SMP_BREDR) { - tfm_aes = NULL; + smp = NULL; goto create_chan; } - tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, 0); + smp = kzalloc(sizeof(*smp), GFP_KERNEL); + if (!smp) + return ERR_PTR(-ENOMEM); + + tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_aes)) { - BT_ERR("Unable to create crypto context"); + BT_ERR("Unable to create ECB crypto context"); + kzfree(smp); return ERR_CAST(tfm_aes); } + tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm_cmac)) { + BT_ERR("Unable to create CMAC crypto context"); + crypto_free_blkcipher(tfm_aes); + kzfree(smp); + return ERR_CAST(tfm_cmac); + } + + smp->tfm_aes = tfm_aes; + smp->tfm_cmac = tfm_cmac; + create_chan: chan = l2cap_chan_create(); if (!chan) { - crypto_free_blkcipher(tfm_aes); + if (smp) { + crypto_free_blkcipher(smp->tfm_aes); + crypto_free_hash(smp->tfm_cmac); + kzfree(smp); + } return ERR_PTR(-ENOMEM); } - chan->data = tfm_aes; + chan->data = smp; l2cap_add_scid(chan, cid); @@ -2983,14 +3165,18 @@ create_chan: static void smp_del_chan(struct l2cap_chan *chan) { - struct crypto_blkcipher *tfm_aes; + struct smp_dev *smp; BT_DBG("chan %p", chan); - tfm_aes = chan->data; - if (tfm_aes) { + smp = chan->data; + if (smp) { chan->data = NULL; - crypto_free_blkcipher(tfm_aes); + if (smp->tfm_aes) + crypto_free_blkcipher(smp->tfm_aes); + if (smp->tfm_cmac) + crypto_free_hash(smp->tfm_cmac); + kzfree(smp); } l2cap_chan_put(chan); @@ -3003,7 +3189,7 @@ static ssize_t force_bredr_smp_read(struct file *file, struct hci_dev *hdev = file->private_data; char buf[3]; - buf[0] = test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags) ? 'Y': 'N'; + buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y': 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); @@ -3025,7 +3211,7 @@ static ssize_t force_bredr_smp_write(struct file *file, if (strtobool(buf, &enable)) return -EINVAL; - if (enable == test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags)) + if (enable == hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) return -EALREADY; if (enable) { @@ -3044,7 +3230,7 @@ static ssize_t force_bredr_smp_write(struct file *file, smp_del_chan(chan); } - change_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags); + hci_dev_change_flag(hdev, HCI_FORCE_BREDR_SMP); return count; } diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h index 60c5b73fcb4b..6cf872563ea7 100644 --- a/net/bluetooth/smp.h +++ b/net/bluetooth/smp.h @@ -188,6 +188,7 @@ int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], const bdaddr_t *bdaddr); int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa); +int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]); int smp_register(struct hci_dev *hdev); void smp_unregister(struct hci_dev *hdev); diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index b087d278c679..1849d96b3c91 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -563,6 +563,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) */ del_nbp(p); + dev_set_mtu(br->dev, br_min_mtu(br)); + spin_lock_bh(&br->lock); changed_addr = br_stp_recalculate_bridge_id(br); spin_unlock_bh(&br->lock); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 8bc6b67457dc..e1115a224a95 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -733,6 +733,9 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, + [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, + [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, + [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, }; static int br_changelink(struct net_device *brdev, struct nlattr *tb[], @@ -762,6 +765,24 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], return err; } + if (data[IFLA_BR_AGEING_TIME]) { + u32 ageing_time = nla_get_u32(data[IFLA_BR_AGEING_TIME]); + + br->ageing_time = clock_t_to_jiffies(ageing_time); + } + + if (data[IFLA_BR_STP_STATE]) { + u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]); + + br_stp_set_enabled(br, stp_enabled); + } + + if (data[IFLA_BR_PRIORITY]) { + u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]); + + br_stp_set_bridge_priority(br, priority); + } + return 0; } @@ -770,6 +791,9 @@ static size_t br_get_size(const struct net_device *brdev) return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ + nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ 0; } @@ -779,10 +803,16 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) u32 forward_delay = jiffies_to_clock_t(br->forward_delay); u32 hello_time = jiffies_to_clock_t(br->hello_time); u32 age_time = jiffies_to_clock_t(br->max_age); + u32 ageing_time = jiffies_to_clock_t(br->ageing_time); + u32 stp_enabled = br->stp_enabled; + u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || - nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time)) + nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || + nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || + nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || + nla_put_u16(skb, IFLA_BR_PRIORITY, priority)) return -EMSGSIZE; return 0; diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index b6bf51bb187d..4ec0c803aef1 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -281,7 +281,7 @@ static int caif_seqpkt_recvmsg(struct socket *sock, struct msghdr *m, int copylen; ret = -EOPNOTSUPP; - if (m->msg_flags&MSG_OOB) + if (flags & MSG_OOB) goto read_error; skb = skb_recv_datagram(sk, flags, 0 , &ret); diff --git a/net/compat.c b/net/compat.c index 478443182bbe..c4b6b0f43d5d 100644 --- a/net/compat.c +++ b/net/compat.c @@ -49,6 +49,13 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg, __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || __get_user(kmsg->msg_flags, &umsg->msg_flags)) return -EFAULT; + + if (!uaddr) + kmsg->msg_namelen = 0; + + if (kmsg->msg_namelen < 0) + return -EINVAL; + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); kmsg->msg_control = compat_ptr(tmp3); @@ -72,6 +79,8 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg, if (nr_segs > UIO_MAXIOV) return -EMSGSIZE; + kmsg->msg_iocb = NULL; + err = compat_rw_copy_check_uvector(save_addr ? READ : WRITE, compat_ptr(uiov), nr_segs, UIO_FASTIOV, *iov, iov); diff --git a/net/core/dev.c b/net/core/dev.c index 962ee9d71964..a0408d497dae 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1385,7 +1385,7 @@ static int __dev_close(struct net_device *dev) return retval; } -static int dev_close_many(struct list_head *head) +int dev_close_many(struct list_head *head, bool unlink) { struct net_device *dev, *tmp; @@ -1399,11 +1399,13 @@ static int dev_close_many(struct list_head *head) list_for_each_entry_safe(dev, tmp, head, close_list) { rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); call_netdevice_notifiers(NETDEV_DOWN, dev); - list_del_init(&dev->close_list); + if (unlink) + list_del_init(&dev->close_list); } return 0; } +EXPORT_SYMBOL(dev_close_many); /** * dev_close - shutdown an interface. @@ -1420,7 +1422,7 @@ int dev_close(struct net_device *dev) LIST_HEAD(single); list_add(&dev->close_list, &single); - dev_close_many(&single); + dev_close_many(&single, true); list_del(&single); } return 0; @@ -1694,6 +1696,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) } skb_scrub_packet(skb, true); + skb->priority = 0; skb->protocol = eth_type_trans(skb, dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); @@ -5912,6 +5915,24 @@ int dev_get_phys_port_id(struct net_device *dev, EXPORT_SYMBOL(dev_get_phys_port_id); /** + * dev_get_phys_port_name - Get device physical port name + * @dev: device + * @name: port name + * + * Get device physical port name + */ +int dev_get_phys_port_name(struct net_device *dev, + char *name, size_t len) +{ + const struct net_device_ops *ops = dev->netdev_ops; + + if (!ops->ndo_get_phys_port_name) + return -EOPNOTSUPP; + return ops->ndo_get_phys_port_name(dev, name, len); +} +EXPORT_SYMBOL(dev_get_phys_port_name); + +/** * dev_new_index - allocate an ifindex * @net: the applicable net namespace * @@ -5968,7 +5989,7 @@ static void rollback_registered_many(struct list_head *head) /* If device is running, close it first. */ list_for_each_entry(dev, head, unreg_list) list_add_tail(&dev->close_list, &close_head); - dev_close_many(&close_head); + dev_close_many(&close_head, true); list_for_each_entry(dev, head, unreg_list) { /* And unlink it from device chain. */ @@ -6841,8 +6862,6 @@ void free_netdev(struct net_device *dev) { struct napi_struct *p, *n; - release_net(dev_net(dev)); - netif_free_tx_queues(dev); #ifdef CONFIG_SYSFS kvfree(dev->_rx); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 44706e81b2e0..68ea6950cad1 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -31,7 +31,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops, r->pref = pref; r->table = table; r->flags = flags; - r->fr_net = hold_net(ops->fro_net); + r->fr_net = ops->fro_net; r->suppress_prefixlen = -1; r->suppress_ifgroup = -1; @@ -116,7 +116,6 @@ static int __fib_rules_register(struct fib_rules_ops *ops) if (ops->family == o->family) goto errout; - hold_net(net); list_add_tail_rcu(&ops->list, &net->rules_ops); err = 0; errout: @@ -160,15 +159,6 @@ static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) } } -static void fib_rules_put_rcu(struct rcu_head *head) -{ - struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu); - struct net *net = ops->fro_net; - - release_net(net); - kfree(ops); -} - void fib_rules_unregister(struct fib_rules_ops *ops) { struct net *net = ops->fro_net; @@ -178,7 +168,7 @@ void fib_rules_unregister(struct fib_rules_ops *ops) fib_rules_cleanup_ops(ops); spin_unlock(&net->rules_mod_lock); - call_rcu(&ops->rcu, fib_rules_put_rcu); + kfree_rcu(ops, rcu); } EXPORT_SYMBOL_GPL(fib_rules_unregister); @@ -303,7 +293,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh) err = -ENOMEM; goto errout; } - rule->fr_net = hold_net(net); + rule->fr_net = net; if (tb[FRA_PRIORITY]) rule->pref = nla_get_u32(tb[FRA_PRIORITY]); @@ -423,7 +413,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh) return 0; errout_free: - release_net(rule->fr_net); kfree(rule); errout: rules_ops_put(ops); @@ -492,6 +481,12 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh) goto errout; } + if (ops->delete) { + err = ops->delete(rule); + if (err) + goto errout; + } + list_del_rcu(&rule->list); if (rule->action == FR_ACT_GOTO) { @@ -517,8 +512,6 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh) notify_rule_change(RTM_DELRULE, rule, ops, nlh, NETLINK_CB(skb).portid); - if (ops->delete) - ops->delete(rule); fib_rule_put(rule); flush_route_cache(ops); rules_ops_put(ops); diff --git a/net/core/filter.c b/net/core/filter.c index 7a4eb7030dba..084eacc4d1d4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -150,10 +150,62 @@ static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) return prandom_u32(); } +static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, + struct bpf_insn *insn_buf) +{ + struct bpf_insn *insn = insn_buf; + + switch (skb_field) { + case SKF_AD_MARK: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); + + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + offsetof(struct sk_buff, mark)); + break; + + case SKF_AD_PKTTYPE: + *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET()); + *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); +#ifdef __BIG_ENDIAN_BITFIELD + *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); +#endif + break; + + case SKF_AD_QUEUE: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); + + *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, + offsetof(struct sk_buff, queue_mapping)); + break; + + case SKF_AD_VLAN_TAG: + case SKF_AD_VLAN_TAG_PRESENT: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); + BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); + + /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ + *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, + offsetof(struct sk_buff, vlan_tci)); + if (skb_field == SKF_AD_VLAN_TAG) { + *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, + ~VLAN_TAG_PRESENT); + } else { + /* dst_reg >>= 12 */ + *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12); + /* dst_reg &= 1 */ + *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); + } + break; + } + + return insn - insn_buf; +} + static bool convert_bpf_extensions(struct sock_filter *fp, struct bpf_insn **insnp) { struct bpf_insn *insn = *insnp; + u32 cnt; switch (fp->k) { case SKF_AD_OFF + SKF_AD_PROTOCOL: @@ -167,13 +219,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, break; case SKF_AD_OFF + SKF_AD_PKTTYPE: - *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX, - PKT_TYPE_OFFSET()); - *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); -#ifdef __BIG_ENDIAN_BITFIELD - insn++; - *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 5); -#endif + cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); + insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_IFINDEX: @@ -197,10 +244,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, break; case SKF_AD_OFF + SKF_AD_MARK: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); - - *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, - offsetof(struct sk_buff, mark)); + cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); + insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_RXHASH: @@ -211,29 +256,20 @@ static bool convert_bpf_extensions(struct sock_filter *fp, break; case SKF_AD_OFF + SKF_AD_QUEUE: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); - - *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, - offsetof(struct sk_buff, queue_mapping)); + cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); + insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_VLAN_TAG: - case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); + cnt = convert_skb_access(SKF_AD_VLAN_TAG, + BPF_REG_A, BPF_REG_CTX, insn); + insn += cnt - 1; + break; - /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */ - *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, - offsetof(struct sk_buff, vlan_tci)); - if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { - *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, - ~VLAN_TAG_PRESENT); - } else { - /* A >>= 12 */ - *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12); - /* A &= 1 */ - *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1); - } + case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: + cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, + BPF_REG_A, BPF_REG_CTX, insn); + insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_PAY_OFFSET: @@ -1139,6 +1175,10 @@ sk_filter_func_proto(enum bpf_func_id func_id) return &bpf_map_update_elem_proto; case BPF_FUNC_map_delete_elem: return &bpf_map_delete_elem_proto; + case BPF_FUNC_get_prandom_u32: + return &bpf_get_prandom_u32_proto; + case BPF_FUNC_get_smp_processor_id: + return &bpf_get_smp_processor_id_proto; default: return NULL; } @@ -1147,13 +1187,70 @@ sk_filter_func_proto(enum bpf_func_id func_id) static bool sk_filter_is_valid_access(int off, int size, enum bpf_access_type type) { - /* skb fields cannot be accessed yet */ - return false; + /* only read is allowed */ + if (type != BPF_READ) + return false; + + /* check bounds */ + if (off < 0 || off >= sizeof(struct __sk_buff)) + return false; + + /* disallow misaligned access */ + if (off % size != 0) + return false; + + /* all __sk_buff fields are __u32 */ + if (size != 4) + return false; + + return true; +} + +static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, + struct bpf_insn *insn_buf) +{ + struct bpf_insn *insn = insn_buf; + + switch (ctx_off) { + case offsetof(struct __sk_buff, len): + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); + + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + offsetof(struct sk_buff, len)); + break; + + case offsetof(struct __sk_buff, protocol): + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); + + *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, + offsetof(struct sk_buff, protocol)); + break; + + case offsetof(struct __sk_buff, mark): + return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn); + + case offsetof(struct __sk_buff, pkt_type): + return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn); + + case offsetof(struct __sk_buff, queue_mapping): + return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn); + + case offsetof(struct __sk_buff, vlan_present): + return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, + dst_reg, src_reg, insn); + + case offsetof(struct __sk_buff, vlan_tci): + return convert_skb_access(SKF_AD_VLAN_TAG, + dst_reg, src_reg, insn); + } + + return insn - insn_buf; } static const struct bpf_verifier_ops sk_filter_ops = { .get_func_proto = sk_filter_func_proto, .is_valid_access = sk_filter_is_valid_access, + .convert_ctx_access = sk_filter_convert_ctx_access, }; static struct bpf_prog_type_list sk_filter_type __read_mostly = { @@ -1166,10 +1263,16 @@ static struct bpf_prog_type_list sched_cls_type __read_mostly = { .type = BPF_PROG_TYPE_SCHED_CLS, }; +static struct bpf_prog_type_list sched_act_type __read_mostly = { + .ops = &sk_filter_ops, + .type = BPF_PROG_TYPE_SCHED_ACT, +}; + static int __init register_sk_filter_ops(void) { bpf_register_prog_type(&sk_filter_type); bpf_register_prog_type(&sched_cls_type); + bpf_register_prog_type(&sched_act_type); return 0; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ad07990e943d..3de654256028 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -591,7 +591,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, if (!n) goto out; - write_pnet(&n->net, hold_net(net)); + write_pnet(&n->net, net); memcpy(n->key, pkey, key_len); n->dev = dev; if (dev) @@ -600,7 +600,6 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, if (tbl->pconstructor && tbl->pconstructor(n)) { if (dev) dev_put(dev); - release_net(net); kfree(n); n = NULL; goto out; @@ -634,7 +633,6 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, tbl->pdestructor(n); if (n->dev) dev_put(n->dev); - release_net(pneigh_net(n)); kfree(n); return 0; } @@ -657,7 +655,6 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) tbl->pdestructor(n); if (n->dev) dev_put(n->dev); - release_net(pneigh_net(n)); kfree(n); continue; } @@ -820,10 +817,9 @@ out: static __inline__ int neigh_max_probes(struct neighbour *n) { struct neigh_parms *p = n->parms; - int max_probes = NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES); - if (!(n->nud_state & NUD_PROBE)) - max_probes += NEIGH_VAR(p, MCAST_PROBES); - return max_probes; + return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) + + (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) : + NEIGH_VAR(p, MCAST_PROBES)); } static void neigh_invalidate(struct neighbour *neigh) @@ -1428,11 +1424,10 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); dev_hold(dev); p->dev = dev; - write_pnet(&p->net, hold_net(net)); + write_pnet(&p->net, net); p->sysctl_table = NULL; if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { - release_net(net); dev_put(dev); kfree(p); return NULL; @@ -1472,7 +1467,6 @@ EXPORT_SYMBOL(neigh_parms_release); static void neigh_parms_destroy(struct neigh_parms *parms) { - release_net(neigh_parms_net(parms)); kfree(parms); } @@ -1747,6 +1741,8 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) NEIGH_VAR(parms, UCAST_PROBES)) || nla_put_u32(skb, NDTPA_MCAST_PROBES, NEIGH_VAR(parms, MCAST_PROBES)) || + nla_put_u32(skb, NDTPA_MCAST_REPROBES, + NEIGH_VAR(parms, MCAST_REPROBES)) || nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) || nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME, NEIGH_VAR(parms, BASE_REACHABLE_TIME)) || @@ -1906,6 +1902,7 @@ static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { [NDTPA_APP_PROBES] = { .type = NLA_U32 }, [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, [NDTPA_MCAST_PROBES] = { .type = NLA_U32 }, + [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 }, [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 }, [NDTPA_GC_STALETIME] = { .type = NLA_U64 }, [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 }, @@ -2006,6 +2003,10 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) NEIGH_VAR_SET(p, MCAST_PROBES, nla_get_u32(tbp[i])); break; + case NDTPA_MCAST_REPROBES: + NEIGH_VAR_SET(p, MCAST_REPROBES, + nla_get_u32(tbp[i])); + break; case NDTPA_BASE_REACHABLE_TIME: NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, nla_get_msecs(tbp[i])); @@ -2992,6 +2993,7 @@ static struct neigh_sysctl_table { NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"), NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"), NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"), + NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"), NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"), NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"), NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"), diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index cf30620a88e1..cc5cf689809c 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -418,6 +418,28 @@ static ssize_t phys_port_id_show(struct device *dev, } static DEVICE_ATTR_RO(phys_port_id); +static ssize_t phys_port_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_dev(dev); + ssize_t ret = -EINVAL; + + if (!rtnl_trylock()) + return restart_syscall(); + + if (dev_isalive(netdev)) { + char name[IFNAMSIZ]; + + ret = dev_get_phys_port_name(netdev, name, sizeof(name)); + if (!ret) + ret = sprintf(buf, "%s\n", name); + } + rtnl_unlock(); + + return ret; +} +static DEVICE_ATTR_RO(phys_port_name); + static ssize_t phys_switch_id_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -465,6 +487,7 @@ static struct attribute *net_class_attrs[] = { &dev_attr_tx_queue_len.attr, &dev_attr_gro_flush_timeout.attr, &dev_attr_phys_port_id.attr, + &dev_attr_phys_port_name.attr, &dev_attr_phys_switch_id.attr, NULL, }; @@ -951,6 +974,60 @@ static ssize_t show_trans_timeout(struct netdev_queue *queue, return sprintf(buf, "%lu", trans_timeout); } +#ifdef CONFIG_XPS +static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue) +{ + struct net_device *dev = queue->dev; + int i; + + for (i = 0; i < dev->num_tx_queues; i++) + if (queue == &dev->_tx[i]) + break; + + BUG_ON(i >= dev->num_tx_queues); + + return i; +} + +static ssize_t show_tx_maxrate(struct netdev_queue *queue, + struct netdev_queue_attribute *attribute, + char *buf) +{ + return sprintf(buf, "%lu\n", queue->tx_maxrate); +} + +static ssize_t set_tx_maxrate(struct netdev_queue *queue, + struct netdev_queue_attribute *attribute, + const char *buf, size_t len) +{ + struct net_device *dev = queue->dev; + int err, index = get_netdev_queue_index(queue); + u32 rate = 0; + + err = kstrtou32(buf, 10, &rate); + if (err < 0) + return err; + + if (!rtnl_trylock()) + return restart_syscall(); + + err = -EOPNOTSUPP; + if (dev->netdev_ops->ndo_set_tx_maxrate) + err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); + + rtnl_unlock(); + if (!err) { + queue->tx_maxrate = rate; + return len; + } + return err; +} + +static struct netdev_queue_attribute queue_tx_maxrate = + __ATTR(tx_maxrate, S_IRUGO | S_IWUSR, + show_tx_maxrate, set_tx_maxrate); +#endif + static struct netdev_queue_attribute queue_trans_timeout = __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL); @@ -1065,18 +1142,6 @@ static struct attribute_group dql_group = { #endif /* CONFIG_BQL */ #ifdef CONFIG_XPS -static unsigned int get_netdev_queue_index(struct netdev_queue *queue) -{ - struct net_device *dev = queue->dev; - unsigned int i; - - i = queue - dev->_tx; - BUG_ON(i >= dev->num_tx_queues); - - return i; -} - - static ssize_t show_xps_map(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, char *buf) { @@ -1153,6 +1218,7 @@ static struct attribute *netdev_queue_default_attrs[] = { &queue_trans_timeout.attr, #ifdef CONFIG_XPS &xps_cpus_attribute.attr, + &queue_tx_maxrate.attr, #endif NULL }; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index cb5290b8c428..e5e96b0f6717 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -236,10 +236,6 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) net->user_ns = user_ns; idr_init(&net->netns_ids); -#ifdef NETNS_REFCNT_DEBUG - atomic_set(&net->use_count, 0); -#endif - list_for_each_entry(ops, &pernet_list, list) { error = ops_init(ops, net); if (error < 0) @@ -294,13 +290,6 @@ out_free: static void net_free(struct net *net) { -#ifdef NETNS_REFCNT_DEBUG - if (unlikely(atomic_read(&net->use_count) != 0)) { - pr_emerg("network namespace not free! Usage: %d\n", - atomic_read(&net->use_count)); - return; - } -#endif kfree(rcu_access_pointer(net->gen)); kmem_cache_free(net_cachep, net); } diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 04db318e6218..87b22c0bc08c 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -58,14 +58,14 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, return -ENOMEM; get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); - rwlock_init(&queue->syn_wait_lock); + spin_lock_init(&queue->syn_wait_lock); queue->rskq_accept_head = NULL; lopt->nr_table_entries = nr_table_entries; lopt->max_qlen_log = ilog2(nr_table_entries); - write_lock_bh(&queue->syn_wait_lock); + spin_lock_bh(&queue->syn_wait_lock); queue->listen_opt = lopt; - write_unlock_bh(&queue->syn_wait_lock); + spin_unlock_bh(&queue->syn_wait_lock); return 0; } @@ -81,10 +81,10 @@ static inline struct listen_sock *reqsk_queue_yank_listen_sk( { struct listen_sock *lopt; - write_lock_bh(&queue->syn_wait_lock); + spin_lock_bh(&queue->syn_wait_lock); lopt = queue->listen_opt; queue->listen_opt = NULL; - write_unlock_bh(&queue->syn_wait_lock); + spin_unlock_bh(&queue->syn_wait_lock); return lopt; } @@ -94,21 +94,26 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) /* make all the listen_opt local to us */ struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); - if (lopt->qlen != 0) { + if (listen_sock_qlen(lopt) != 0) { unsigned int i; for (i = 0; i < lopt->nr_table_entries; i++) { struct request_sock *req; + spin_lock_bh(&queue->syn_wait_lock); while ((req = lopt->syn_table[i]) != NULL) { lopt->syn_table[i] = req->dl_next; - lopt->qlen--; - reqsk_free(req); + atomic_inc(&lopt->qlen_dec); + if (del_timer(&req->rsk_timer)) + reqsk_put(req); + reqsk_put(req); } + spin_unlock_bh(&queue->syn_wait_lock); } } - WARN_ON(lopt->qlen != 0); + if (WARN_ON(listen_sock_qlen(lopt) != 0)) + pr_err("qlen %u\n", listen_sock_qlen(lopt)); kvfree(lopt); } @@ -153,24 +158,22 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) * case might also exist in tcp_v4_hnd_req() that will trigger this locking * order. * - * When a TFO req is created, it needs to sock_hold its listener to prevent - * the latter data structure from going away. - * - * This function also sets "treq->listener" to NULL and unreference listener - * socket. treq->listener is used by the listener so it is protected by the + * This function also sets "treq->tfo_listener" to false. + * treq->tfo_listener is used by the listener so it is protected by the * fastopenq->lock in this function. */ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, bool reset) { - struct sock *lsk = tcp_rsk(req)->listener; - struct fastopen_queue *fastopenq = - inet_csk(lsk)->icsk_accept_queue.fastopenq; + struct sock *lsk = req->rsk_listener; + struct fastopen_queue *fastopenq; + + fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq; tcp_sk(sk)->fastopen_rsk = NULL; spin_lock_bh(&fastopenq->lock); fastopenq->qlen--; - tcp_rsk(req)->listener = NULL; + tcp_rsk(req)->tfo_listener = false; if (req->sk) /* the child socket hasn't been accepted yet */ goto out; @@ -179,8 +182,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, * special RST handling below. */ spin_unlock_bh(&fastopenq->lock); - sock_put(lsk); - reqsk_free(req); + reqsk_put(req); return; } /* Wait for 60secs before removing a req that has triggered RST. @@ -190,7 +192,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, * * For more details see CoNext'11 "TCP Fast Open" paper. */ - req->expires = jiffies + 60*HZ; + req->rsk_timer.expires = jiffies + 60*HZ; if (fastopenq->rskq_rst_head == NULL) fastopenq->rskq_rst_head = req; else @@ -201,5 +203,4 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, fastopenq->qlen++; out: spin_unlock_bh(&fastopenq->lock); - sock_put(lsk); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 25b4b5d23485..e7695104dbf0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -982,6 +982,24 @@ static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev) return 0; } +static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev) +{ + char name[IFNAMSIZ]; + int err; + + err = dev_get_phys_port_name(dev, name, sizeof(name)); + if (err) { + if (err == -EOPNOTSUPP) + return 0; + return err; + } + + if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name)) + return -EMSGSIZE; + + return 0; +} + static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev) { int err; @@ -1072,6 +1090,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, if (rtnl_phys_port_id_fill(skb, dev)) goto nla_put_failure; + if (rtnl_phys_port_name_fill(skb, dev)) + goto nla_put_failure; + if (rtnl_phys_switch_id_fill(skb, dev)) goto nla_put_failure; @@ -2166,28 +2187,28 @@ replay: } } err = rtnl_configure_link(dev, ifm); - if (err < 0) { - if (ops->newlink) { - LIST_HEAD(list_kill); - - ops->dellink(dev, &list_kill); - unregister_netdevice_many(&list_kill); - } else { - unregister_netdevice(dev); - } - goto out; - } - + if (err < 0) + goto out_unregister; if (link_net) { err = dev_change_net_namespace(dev, dest_net, ifname); if (err < 0) - unregister_netdevice(dev); + goto out_unregister; } out: if (link_net) put_net(link_net); put_net(dest_net); return err; +out_unregister: + if (ops->newlink) { + LIST_HEAD(list_kill); + + ops->dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); + } else { + unregister_netdevice(dev); + } + goto out; } } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 47c32413d5b9..cdb939b731aa 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3689,9 +3689,13 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, struct sock *sk, int tstype) { struct sk_buff *skb; - bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; + bool tsonly; - if (!sk || !skb_may_tx_timestamp(sk, tsonly)) + if (!sk) + return; + + tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; + if (!skb_may_tx_timestamp(sk, tsonly)) return; if (tsonly) @@ -4129,7 +4133,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) skb->ignore_df = 0; skb_dst_drop(skb); skb->mark = 0; - skb->sender_cpu = 0; + skb_sender_cpu_clear(skb); skb_init_secmark(skb); secpath_reset(skb); nf_reset(skb); diff --git a/net/core/sock.c b/net/core/sock.c index 726e1f99aa8d..119ae464b44a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -928,8 +928,6 @@ set_rcvbuf: sk->sk_mark = val; break; - /* We implement the SO_SNDLOWAT etc to - not be settable (1003.1g 5.3) */ case SO_RXQ_OVFL: sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); break; @@ -1234,6 +1232,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; default: + /* We implement the SO_SNDLOWAT etc to not be settable + * (1003.1g 7). + */ return -ENOPROTOOPT; } @@ -1454,9 +1455,8 @@ void sk_release_kernel(struct sock *sk) return; sock_hold(sk); - sock_release(sk->sk_socket); - release_net(sock_net(sk)); sock_net_set(sk, get_net(&init_net)); + sock_release(sk->sk_socket); sock_put(sk); } EXPORT_SYMBOL(sk_release_kernel); @@ -1538,6 +1538,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) newsk->sk_err = 0; newsk->sk_priority = 0; newsk->sk_incoming_cpu = raw_smp_processor_id(); + atomic64_set(&newsk->sk_cookie, 0); /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) @@ -1655,25 +1656,16 @@ void sock_rfree(struct sk_buff *skb) } EXPORT_SYMBOL(sock_rfree); +/* + * Buffer destructor for skbs that are not used directly in read or write + * path, e.g. for error handler skbs. Automatically called from kfree_skb. + */ void sock_efree(struct sk_buff *skb) { sock_put(skb->sk); } EXPORT_SYMBOL(sock_efree); -#ifdef CONFIG_INET -void sock_edemux(struct sk_buff *skb) -{ - struct sock *sk = skb->sk; - - if (sk->sk_state == TCP_TIME_WAIT) - inet_twsk_put(inet_twsk(sk)); - else - sock_put(sk); -} -EXPORT_SYMBOL(sock_edemux); -#endif - kuid_t sock_i_uid(struct sock *sk) { kuid_t uid; @@ -2726,6 +2718,42 @@ static inline void release_proto_idx(struct proto *prot) } #endif +static void req_prot_cleanup(struct request_sock_ops *rsk_prot) +{ + if (!rsk_prot) + return; + kfree(rsk_prot->slab_name); + rsk_prot->slab_name = NULL; + if (rsk_prot->slab) { + kmem_cache_destroy(rsk_prot->slab); + rsk_prot->slab = NULL; + } +} + +static int req_prot_init(const struct proto *prot) +{ + struct request_sock_ops *rsk_prot = prot->rsk_prot; + + if (!rsk_prot) + return 0; + + rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", + prot->name); + if (!rsk_prot->slab_name) + return -ENOMEM; + + rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, + rsk_prot->obj_size, 0, + 0, NULL); + + if (!rsk_prot->slab) { + pr_crit("%s: Can't create request sock SLAB cache!\n", + prot->name); + return -ENOMEM; + } + return 0; +} + int proto_register(struct proto *prot, int alloc_slab) { if (alloc_slab) { @@ -2739,21 +2767,8 @@ int proto_register(struct proto *prot, int alloc_slab) goto out; } - if (prot->rsk_prot != NULL) { - prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); - if (prot->rsk_prot->slab_name == NULL) - goto out_free_sock_slab; - - prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, - prot->rsk_prot->obj_size, 0, - SLAB_HWCACHE_ALIGN, NULL); - - if (prot->rsk_prot->slab == NULL) { - pr_crit("%s: Can't create request sock SLAB cache!\n", - prot->name); - goto out_free_request_sock_slab_name; - } - } + if (req_prot_init(prot)) + goto out_free_request_sock_slab; if (prot->twsk_prot != NULL) { prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); @@ -2782,14 +2797,8 @@ int proto_register(struct proto *prot, int alloc_slab) out_free_timewait_sock_slab_name: kfree(prot->twsk_prot->twsk_slab_name); out_free_request_sock_slab: - if (prot->rsk_prot && prot->rsk_prot->slab) { - kmem_cache_destroy(prot->rsk_prot->slab); - prot->rsk_prot->slab = NULL; - } -out_free_request_sock_slab_name: - if (prot->rsk_prot) - kfree(prot->rsk_prot->slab_name); -out_free_sock_slab: + req_prot_cleanup(prot->rsk_prot); + kmem_cache_destroy(prot->slab); prot->slab = NULL; out: @@ -2809,11 +2818,7 @@ void proto_unregister(struct proto *prot) prot->slab = NULL; } - if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { - kmem_cache_destroy(prot->rsk_prot->slab); - kfree(prot->rsk_prot->slab_name); - prot->rsk_prot->slab = NULL; - } + req_prot_cleanup(prot->rsk_prot); if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { kmem_cache_destroy(prot->twsk_prot->twsk_slab); diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 96e70ee05a8d..74dddf84adcd 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -13,22 +13,39 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); static DEFINE_MUTEX(sock_diag_table_mutex); -int sock_diag_check_cookie(void *sk, const __u32 *cookie) +static u64 sock_gen_cookie(struct sock *sk) { - if ((cookie[0] != INET_DIAG_NOCOOKIE || - cookie[1] != INET_DIAG_NOCOOKIE) && - ((u32)(unsigned long)sk != cookie[0] || - (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1])) - return -ESTALE; - else + while (1) { + u64 res = atomic64_read(&sk->sk_cookie); + + if (res) + return res; + res = atomic64_inc_return(&sock_net(sk)->cookie_gen); + atomic64_cmpxchg(&sk->sk_cookie, 0, res); + } +} + +int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie) +{ + u64 res; + + if (cookie[0] == INET_DIAG_NOCOOKIE && cookie[1] == INET_DIAG_NOCOOKIE) return 0; + + res = sock_gen_cookie(sk); + if ((u32)res != cookie[0] || (u32)(res >> 32) != cookie[1]) + return -ESTALE; + + return 0; } EXPORT_SYMBOL_GPL(sock_diag_check_cookie); -void sock_diag_save_cookie(void *sk, __u32 *cookie) +void sock_diag_save_cookie(struct sock *sk, __u32 *cookie) { - cookie[0] = (u32)(unsigned long)sk; - cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); + u64 res = sock_gen_cookie(sk); + + cookie[0] = (u32)res; + cookie[1] = (u32)(res >> 32); } EXPORT_SYMBOL_GPL(sock_diag_save_cookie); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 433424804284..95b6139d710c 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -24,7 +24,8 @@ static int zero = 0; static int one = 1; -static int ushort_max = USHRT_MAX; +static int min_sndbuf = SOCK_MIN_SNDBUF; +static int min_rcvbuf = SOCK_MIN_RCVBUF; static int net_msg_warn; /* Unused, but still a sysctl */ @@ -237,7 +238,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_sndbuf, }, { .procname = "rmem_max", @@ -245,7 +246,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_rcvbuf, }, { .procname = "wmem_default", @@ -253,7 +254,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_sndbuf, }, { .procname = "rmem_default", @@ -261,7 +262,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_rcvbuf, }, { .procname = "dev_weight", @@ -401,7 +402,6 @@ static struct ctl_table netns_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .extra1 = &zero, - .extra2 = &ushort_max, .proc_handler = proc_dointvec_minmax }, { } diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 3b1d64d6e093..bebc735f5afc 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -280,8 +280,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst); struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, - struct request_sock *req, - struct request_sock **prev); + struct request_sock *req); int dccp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); @@ -318,6 +317,7 @@ int inet_dccp_listen(struct socket *sock, int backlog); unsigned int dccp_poll(struct file *file, struct socket *sock, poll_table *wait); int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); +void dccp_req_err(struct sock *sk, u64 seq); struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb); int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index e45b968613a4..2b4f21d34df6 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -89,10 +89,9 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (inet->inet_saddr == 0) inet->inet_saddr = fl4->saddr; - inet->inet_rcv_saddr = inet->inet_saddr; - + sk_rcv_saddr_set(sk, inet->inet_saddr); inet->inet_dport = usin->sin_port; - inet->inet_daddr = daddr; + sk_daddr_set(sk, daddr); inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt) @@ -196,6 +195,32 @@ static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk) dst->ops->redirect(dst, sk, skb); } +void dccp_req_err(struct sock *sk, u64 seq) + { + struct request_sock *req = inet_reqsk(sk); + struct net *net = sock_net(sk); + + /* + * ICMPs are not backlogged, hence we cannot get an established + * socket here. + */ + WARN_ON(req->sk); + + if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) { + NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); + reqsk_put(req); + } else { + /* + * Still in RESPOND, just remove it silently. + * There is no good way to pass the error to the newly + * created socket, and POSIX does not want network + * errors returned from accept(). + */ + inet_csk_reqsk_queue_drop(req->rsk_listener, req); + } +} +EXPORT_SYMBOL(dccp_req_err); + /* * This routine is called by the ICMP module when it gets some sort of error * condition. If err < 0 then the socket should be closed and the error @@ -228,10 +253,11 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) return; } - sk = inet_lookup(net, &dccp_hashinfo, - iph->daddr, dh->dccph_dport, - iph->saddr, dh->dccph_sport, inet_iif(skb)); - if (sk == NULL) { + sk = __inet_lookup_established(net, &dccp_hashinfo, + iph->daddr, dh->dccph_dport, + iph->saddr, ntohs(dh->dccph_sport), + inet_iif(skb)); + if (!sk) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; } @@ -240,6 +266,9 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) inet_twsk_put(inet_twsk(sk)); return; } + seq = dccp_hdr_seq(dh); + if (sk->sk_state == DCCP_NEW_SYN_RECV) + return dccp_req_err(sk, seq); bh_lock_sock(sk); /* If too many ICMPs get dropped on busy @@ -252,7 +281,6 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) goto out; dp = dccp_sk(sk); - seq = dccp_hdr_seq(dh); if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && !between48(seq, dp->dccps_awl, dp->dccps_awh)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); @@ -289,35 +317,6 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) } switch (sk->sk_state) { - struct request_sock *req , **prev; - case DCCP_LISTEN: - if (sock_owned_by_user(sk)) - goto out; - req = inet_csk_search_req(sk, &prev, dh->dccph_dport, - iph->daddr, iph->saddr); - if (!req) - goto out; - - /* - * ICMPs are not backlogged, hence we cannot get an established - * socket here. - */ - WARN_ON(req->sk); - - if (!between48(seq, dccp_rsk(req)->dreq_iss, - dccp_rsk(req)->dreq_gss)) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); - goto out; - } - /* - * Still in RESPOND, just remove it silently. - * There is no good way to pass the error to the newly - * created socket, and POSIX does not want network - * errors returned from accept(). - */ - inet_csk_reqsk_queue_drop(sk, req, prev); - goto out; - case DCCP_REQUESTING: case DCCP_RESPOND: if (!sock_owned_by_user(sk)) { @@ -408,8 +407,8 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, newinet = inet_sk(newsk); ireq = inet_rsk(req); - newinet->inet_daddr = ireq->ir_rmt_addr; - newinet->inet_rcv_saddr = ireq->ir_loc_addr; + sk_daddr_set(newsk, ireq->ir_rmt_addr); + sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); newinet->inet_saddr = ireq->ir_loc_addr; newinet->inet_opt = ireq->opt; ireq->opt = NULL; @@ -449,14 +448,14 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) const struct dccp_hdr *dh = dccp_hdr(skb); const struct iphdr *iph = ip_hdr(skb); struct sock *nsk; - struct request_sock **prev; /* Find possible connection requests. */ - struct request_sock *req = inet_csk_search_req(sk, &prev, - dh->dccph_sport, + struct request_sock *req = inet_csk_search_req(sk, dh->dccph_sport, iph->saddr, iph->daddr); - if (req != NULL) - return dccp_check_req(sk, skb, req, prev); - + if (req) { + nsk = dccp_check_req(sk, skb, req); + reqsk_put(req); + return nsk; + } nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo, iph->saddr, dh->dccph_sport, iph->daddr, dh->dccph_dport, @@ -575,7 +574,7 @@ static void dccp_v4_reqsk_destructor(struct request_sock *req) kfree(inet_rsk(req)->opt); } -void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req) +void dccp_syn_ack_timeout(const struct request_sock *req) { } EXPORT_SYMBOL(dccp_syn_ack_timeout); @@ -624,7 +623,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; - req = inet_reqsk_alloc(&dccp_request_sock_ops); + req = inet_reqsk_alloc(&dccp_request_sock_ops, sk); if (req == NULL) goto drop; @@ -639,8 +638,10 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; ireq = inet_rsk(req); - ireq->ir_loc_addr = ip_hdr(skb)->daddr; - ireq->ir_rmt_addr = ip_hdr(skb)->saddr; + sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); + sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); + ireq->ireq_family = AF_INET; + ireq->ir_iif = sk->sk_bound_dev_if; /* * Step 3: Process LISTEN state diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 6bcaa33cd804..9d0551092c6c 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -40,19 +40,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped; static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops; -static void dccp_v6_hash(struct sock *sk) -{ - if (sk->sk_state != DCCP_CLOSED) { - if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) { - inet_hash(sk); - return; - } - local_bh_disable(); - __inet6_hash(sk, NULL); - local_bh_enable(); - } -} - /* add pseudo-header to DCCP checksum stored in skb->csum */ static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, const struct in6_addr *saddr, @@ -98,11 +85,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, return; } - sk = inet6_lookup(net, &dccp_hashinfo, - &hdr->daddr, dh->dccph_dport, - &hdr->saddr, dh->dccph_sport, inet6_iif(skb)); + sk = __inet6_lookup_established(net, &dccp_hashinfo, + &hdr->daddr, dh->dccph_dport, + &hdr->saddr, ntohs(dh->dccph_sport), + inet6_iif(skb)); - if (sk == NULL) { + if (!sk) { ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; @@ -112,6 +100,9 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, inet_twsk_put(inet_twsk(sk)); return; } + seq = dccp_hdr_seq(dh); + if (sk->sk_state == DCCP_NEW_SYN_RECV) + return dccp_req_err(sk, seq); bh_lock_sock(sk); if (sock_owned_by_user(sk)) @@ -121,7 +112,6 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, goto out; dp = dccp_sk(sk); - seq = dccp_hdr_seq(dh); if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && !between48(seq, dp->dccps_awl, dp->dccps_awh)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); @@ -162,32 +152,6 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, /* Might be for an request_sock */ switch (sk->sk_state) { - struct request_sock *req, **prev; - case DCCP_LISTEN: - if (sock_owned_by_user(sk)) - goto out; - - req = inet6_csk_search_req(sk, &prev, dh->dccph_dport, - &hdr->daddr, &hdr->saddr, - inet6_iif(skb)); - if (req == NULL) - goto out; - - /* - * ICMPs are not backlogged, hence we cannot get an established - * socket here. - */ - WARN_ON(req->sk != NULL); - - if (!between48(seq, dccp_rsk(req)->dreq_iss, - dccp_rsk(req)->dreq_gss)) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); - goto out; - } - - inet_csk_reqsk_queue_drop(sk, req, prev); - goto out; - case DCCP_REQUESTING: case DCCP_RESPOND: /* Cannot happen. It can, it SYNs are crossed. --ANK */ @@ -330,17 +294,16 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); const struct ipv6hdr *iph = ipv6_hdr(skb); + struct request_sock *req; struct sock *nsk; - struct request_sock **prev; - /* Find possible connection requests. */ - struct request_sock *req = inet6_csk_search_req(sk, &prev, - dh->dccph_sport, - &iph->saddr, - &iph->daddr, - inet6_iif(skb)); - if (req != NULL) - return dccp_check_req(sk, skb, req, prev); + req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr, + &iph->daddr, inet6_iif(skb)); + if (req) { + nsk = dccp_check_req(sk, skb, req); + reqsk_put(req); + return nsk; + } nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, &iph->saddr, dh->dccph_sport, &iph->daddr, ntohs(dh->dccph_dport), @@ -386,7 +349,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; - req = inet_reqsk_alloc(&dccp6_request_sock_ops); + req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk); if (req == NULL) goto drop; @@ -403,6 +366,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ireq = inet_rsk(req); ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; + ireq->ireq_family = AF_INET6; if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || @@ -469,11 +433,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, memcpy(newnp, np, sizeof(struct ipv6_pinfo)); - ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr); - - ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); - - newsk->sk_v6_rcv_saddr = newnp->saddr; + newnp->saddr = newsk->sk_v6_rcv_saddr; inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; newsk->sk_backlog_rcv = dccp_v4_do_rcv; @@ -591,7 +551,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, dccp_done(newsk); goto out; } - __inet6_hash(newsk, NULL); + __inet_hash(newsk, NULL); return newsk; @@ -916,9 +876,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, sk->sk_backlog_rcv = dccp_v6_do_rcv; goto failure; } - ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); - ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &sk->sk_v6_rcv_saddr); - + np->saddr = sk->sk_v6_rcv_saddr; return err; } @@ -1061,7 +1019,7 @@ static struct proto dccp_v6_prot = { .sendmsg = dccp_sendmsg, .recvmsg = dccp_recvmsg, .backlog_rcv = dccp_v6_do_rcv, - .hash = dccp_v6_hash, + .hash = inet_hash, .unhash = inet_unhash, .accept = inet_csk_accept, .get_port = inet_csk_get_port, diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index b50dc436db1f..332f7d6d9942 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -152,8 +152,7 @@ EXPORT_SYMBOL_GPL(dccp_create_openreq_child); * as an request_sock. */ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, - struct request_sock *req, - struct request_sock **prev) + struct request_sock *req) { struct sock *child = NULL; struct dccp_request_sock *dreq = dccp_rsk(req); @@ -200,7 +199,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, if (child == NULL) goto listen_overflow; - inet_csk_reqsk_queue_unlink(sk, req, prev); + inet_csk_reqsk_queue_unlink(sk, req); inet_csk_reqsk_queue_removed(sk, req); inet_csk_reqsk_queue_add(sk, req, child); out: @@ -212,7 +211,7 @@ drop: if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) req->rsk_ops->send_reset(sk, skb); - inet_csk_reqsk_queue_drop(sk, req, prev); + inet_csk_reqsk_queue_drop(sk, req); goto out; } diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 1cd46a345cb0..3ef7acef3ce8 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c @@ -161,33 +161,11 @@ out: sock_put(sk); } -/* - * Timer for listening sockets - */ -static void dccp_response_timer(struct sock *sk) -{ - inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT, - DCCP_RTO_MAX); -} - static void dccp_keepalive_timer(unsigned long data) { struct sock *sk = (struct sock *)data; - /* Only process if socket is not in use. */ - bh_lock_sock(sk); - if (sock_owned_by_user(sk)) { - /* Try again later. */ - inet_csk_reset_keepalive_timer(sk, HZ / 20); - goto out; - } - - if (sk->sk_state == DCCP_LISTEN) { - dccp_response_timer(sk); - goto out; - } -out: - bh_unlock_sock(sk); + pr_err("dccp should not use a keepalive timer !\n"); sock_put(sk); } diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index b45206e8dd3e..ff7736f7ff42 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -5,10 +5,12 @@ config HAVE_NET_DSA # Drivers must select NET_DSA and the appropriate tagging format config NET_DSA - tristate - depends on HAVE_NET_DSA + tristate "Distributed Switch Architecture" + depends on HAVE_NET_DSA && NET_SWITCHDEV select PHYLIB - select NET_SWITCHDEV + ---help--- + Say Y if you want to enable support for the hardware switches supported + by the Distributed Switch Architecture. if NET_DSA diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a47305c72fcc..f0af7aa331c1 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -16,6 +16,7 @@ #include <linux/of_net.h> #include <linux/of_mdio.h> #include <net/rtnetlink.h> +#include <net/switchdev.h> #include <linux/if_bridge.h> #include "dsa_priv.h" @@ -572,8 +573,11 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_set_rx_mode = dsa_slave_set_rx_mode, .ndo_set_mac_address = dsa_slave_set_mac_address, .ndo_do_ioctl = dsa_slave_ioctl, - .ndo_switch_parent_id_get = dsa_slave_parent_id_get, - .ndo_switch_port_stp_update = dsa_slave_stp_update, +}; + +static const struct swdev_ops dsa_slave_swdev_ops = { + .swdev_parent_id_get = dsa_slave_parent_id_get, + .swdev_port_stp_update = dsa_slave_stp_update, }; static void dsa_slave_adjust_link(struct net_device *dev) @@ -617,6 +621,24 @@ static int dsa_slave_fixed_link_update(struct net_device *dev, } /* slave device setup *******************************************************/ +static int dsa_slave_phy_connect(struct dsa_slave_priv *p, + struct net_device *slave_dev, + int addr) +{ + struct dsa_switch *ds = p->parent; + + p->phy = ds->slave_mii_bus->phy_map[addr]; + if (!p->phy) + return -ENODEV; + + /* Use already configured phy mode */ + p->phy_interface = p->phy->interface; + phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, + p->phy_interface); + + return 0; +} + static int dsa_slave_phy_setup(struct dsa_slave_priv *p, struct net_device *slave_dev) { @@ -650,10 +672,25 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, if (ds->drv->get_phy_flags) phy_flags = ds->drv->get_phy_flags(ds, p->port); - if (phy_dn) - p->phy = of_phy_connect(slave_dev, phy_dn, - dsa_slave_adjust_link, phy_flags, - p->phy_interface); + if (phy_dn) { + ret = of_mdio_parse_addr(&slave_dev->dev, phy_dn); + /* If this PHY address is part of phys_mii_mask, which means + * that we need to divert reads and writes to/from it, then we + * want to bind this device using the slave MII bus created by + * DSA to make that happen. + */ + if (!phy_is_fixed && ret >= 0 && + (ds->phys_mii_mask & (1 << ret))) { + ret = dsa_slave_phy_connect(p, slave_dev, ret); + if (ret) + return ret; + } else { + p->phy = of_phy_connect(slave_dev, phy_dn, + dsa_slave_adjust_link, + phy_flags, + p->phy_interface); + } + } if (p->phy && phy_is_fixed) fixed_phy_set_link_update(p->phy, dsa_slave_fixed_link_update); @@ -662,14 +699,9 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, * MDIO bus instead */ if (!p->phy) { - p->phy = ds->slave_mii_bus->phy_map[p->port]; - if (!p->phy) - return -ENODEV; - - /* Use already configured phy mode */ - p->phy_interface = p->phy->interface; - phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, - p->phy_interface); + ret = dsa_slave_phy_connect(p, slave_dev, p->port); + if (ret) + return ret; } else { netdev_info(slave_dev, "attached PHY at address %d [%s]\n", p->phy->addr, p->phy->drv->name); @@ -727,6 +759,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, eth_hw_addr_inherit(slave_dev, master); slave_dev->tx_queue_len = 0; slave_dev->netdev_ops = &dsa_slave_netdev_ops; + slave_dev->swdev_ops = &dsa_slave_swdev_ops; SET_NETDEV_DEV(slave_dev, parent); slave_dev->dev.of_node = ds->pd->port_dn[port]; diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index dfd3c6007f60..0ae5822ef944 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -113,7 +113,7 @@ static void lowpan_setup(struct net_device *dev) { dev->addr_len = IEEE802154_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); - dev->type = ARPHRD_IEEE802154; + dev->type = ARPHRD_6LOWPAN; /* Frame Control + Sequence Number + Address fields + Security Header */ dev->hard_header_len = 2 + 1 + 20 + 14; dev->needed_tailroom = 2; /* FCS */ diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c index 888d0991c761..2ee00e8a0308 100644 --- a/net/ieee802154/core.c +++ b/net/ieee802154/core.c @@ -25,6 +25,9 @@ #include "sysfs.h" #include "core.h" +/* name for sysfs, %d is appended */ +#define PHY_NAME "phy" + /* RCU-protected (and RTNL for writers) */ LIST_HEAD(cfg802154_rdev_list); int cfg802154_rdev_list_generation; @@ -122,7 +125,7 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size) INIT_LIST_HEAD(&rdev->wpan_dev_list); device_initialize(&rdev->wpan_phy.dev); - dev_set_name(&rdev->wpan_phy.dev, "wpan-phy%d", rdev->wpan_phy_idx); + dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx); rdev->wpan_phy.dev.class = &wpan_phy_class; rdev->wpan_phy.dev.platform_data = rdev; diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index 9105265920fe..2b4955d7aae5 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -76,7 +76,6 @@ nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } -EXPORT_SYMBOL(ieee802154_nl_start_confirm); static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct net_device *dev) diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c index dff55c2d87f3..133b4280660c 100644 --- a/net/ieee802154/sysfs.c +++ b/net/ieee802154/sysfs.c @@ -48,49 +48,6 @@ static ssize_t name_show(struct device *dev, } static DEVICE_ATTR_RO(name); -#define MASTER_SHOW_COMPLEX(name, format_string, args...) \ -static ssize_t name ## _show(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \ - int ret; \ - \ - mutex_lock(&phy->pib_lock); \ - ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \ - mutex_unlock(&phy->pib_lock); \ - return ret; \ -} \ -static DEVICE_ATTR_RO(name) - -#define MASTER_SHOW(field, format_string) \ - MASTER_SHOW_COMPLEX(field, format_string, phy->field) - -MASTER_SHOW(current_channel, "%d"); -MASTER_SHOW(current_page, "%d"); -MASTER_SHOW(transmit_power, "%d +- 1 dB"); -MASTER_SHOW_COMPLEX(cca_mode, "%d", phy->cca.mode); - -static ssize_t channels_supported_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); - int ret; - int i, len = 0; - - mutex_lock(&phy->pib_lock); - for (i = 0; i < 32; i++) { - ret = snprintf(buf + len, PAGE_SIZE - len, - "%#09x\n", phy->channels_supported[i]); - if (ret < 0) - break; - len += ret; - } - mutex_unlock(&phy->pib_lock); - return len; -} -static DEVICE_ATTR_RO(channels_supported); - static void wpan_phy_release(struct device *dev) { struct cfg802154_registered_device *rdev = dev_to_rdev(dev); @@ -101,12 +58,6 @@ static void wpan_phy_release(struct device *dev) static struct attribute *pmib_attrs[] = { &dev_attr_index.attr, &dev_attr_name.attr, - /* below will be removed soon */ - &dev_attr_current_channel.attr, - &dev_attr_current_page.attr, - &dev_attr_channels_supported.attr, - &dev_attr_transmit_power.attr, - &dev_attr_cca_mode.attr, NULL, }; ATTRIBUTE_GROUPS(pmib); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 5105759e4e00..975ee5e30c64 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -107,7 +107,7 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = { static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE]; -static u32 inet_addr_hash(struct net *net, __be32 addr) +static u32 inet_addr_hash(const struct net *net, __be32 addr) { u32 val = (__force u32) addr ^ net_hash_mix(net); @@ -560,9 +560,9 @@ static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa) lock_sock(sk); if (join) - ret = __ip_mc_join_group(sk, &mreq); + ret = ip_mc_join_group(sk, &mreq); else - ret = __ip_mc_leave_group(sk, &mreq); + ret = ip_mc_leave_group(sk, &mreq); release_sock(sk); return ret; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index e067770235bf..e5b6b0534c5f 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -52,12 +52,12 @@ static int __net_init fib4_rules_init(struct net *net) { struct fib_table *local_table, *main_table; - local_table = fib_trie_table(RT_TABLE_LOCAL); - if (local_table == NULL) + main_table = fib_trie_table(RT_TABLE_MAIN, NULL); + if (main_table == NULL) return -ENOMEM; - main_table = fib_trie_table(RT_TABLE_MAIN); - if (main_table == NULL) + local_table = fib_trie_table(RT_TABLE_LOCAL, main_table); + if (local_table == NULL) goto fail; hlist_add_head_rcu(&local_table->tb_hlist, @@ -67,14 +67,14 @@ static int __net_init fib4_rules_init(struct net *net) return 0; fail: - fib_free_table(local_table); + fib_free_table(main_table); return -ENOMEM; } #else struct fib_table *fib_new_table(struct net *net, u32 id) { - struct fib_table *tb; + struct fib_table *tb, *alias = NULL; unsigned int h; if (id == 0) @@ -83,7 +83,10 @@ struct fib_table *fib_new_table(struct net *net, u32 id) if (tb) return tb; - tb = fib_trie_table(id); + if (id == RT_TABLE_LOCAL) + alias = fib_new_table(net, RT_TABLE_MAIN); + + tb = fib_trie_table(id, alias); if (!tb) return NULL; @@ -126,6 +129,51 @@ struct fib_table *fib_get_table(struct net *net, u32 id) } #endif /* CONFIG_IP_MULTIPLE_TABLES */ +static void fib_replace_table(struct net *net, struct fib_table *old, + struct fib_table *new) +{ +#ifdef CONFIG_IP_MULTIPLE_TABLES + switch (new->tb_id) { + case RT_TABLE_LOCAL: + rcu_assign_pointer(net->ipv4.fib_local, new); + break; + case RT_TABLE_MAIN: + rcu_assign_pointer(net->ipv4.fib_main, new); + break; + case RT_TABLE_DEFAULT: + rcu_assign_pointer(net->ipv4.fib_default, new); + break; + default: + break; + } + +#endif + /* replace the old table in the hlist */ + hlist_replace_rcu(&old->tb_hlist, &new->tb_hlist); +} + +int fib_unmerge(struct net *net) +{ + struct fib_table *old, *new; + + /* attempt to fetch local table if it has been allocated */ + old = fib_get_table(net, RT_TABLE_LOCAL); + if (!old) + return 0; + + new = fib_trie_unmerge(old); + if (!new) + return -ENOMEM; + + /* replace merged table with clean table */ + if (new != old) { + fib_replace_table(net, old, new); + fib_free_table(old); + } + + return 0; +} + static void fib_flush(struct net *net) { int flushed = 0; @@ -1126,11 +1174,12 @@ static void ip_fib_net_exit(struct net *net) { unsigned int i; + rtnl_lock(); + #ifdef CONFIG_IP_MULTIPLE_TABLES fib4_rules_exit(net); #endif - rtnl_lock(); for (i = 0; i < FIB_TABLE_HASHSZ; i++) { struct hlist_head *head = &net->ipv4.fib_table_hash[i]; struct hlist_node *tmp; diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index ae2e6eede46e..c6211ed60b03 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h @@ -12,6 +12,7 @@ struct fib_alias { u8 fa_type; u8 fa_state; u8 fa_slen; + u32 tb_id; struct rcu_head rcu; }; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 190d0d00d744..e9bc5e42cf43 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -174,6 +174,11 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, if (frh->tos & ~IPTOS_TOS_MASK) goto errout; + /* split local/main if they are not already split */ + err = fib_unmerge(net); + if (err) + goto errout; + if (rule->table == RT_TABLE_UNSPEC) { if (rule->action == FR_ACT_TO_TBL) { struct fib_table *table; @@ -216,17 +221,24 @@ errout: return err; } -static void fib4_rule_delete(struct fib_rule *rule) +static int fib4_rule_delete(struct fib_rule *rule) { struct net *net = rule->fr_net; -#ifdef CONFIG_IP_ROUTE_CLASSID - struct fib4_rule *rule4 = (struct fib4_rule *) rule; + int err; - if (rule4->tclassid) + /* split local/main if they are not already split */ + err = fib_unmerge(net); + if (err) + goto errout; + +#ifdef CONFIG_IP_ROUTE_CLASSID + if (((struct fib4_rule *)rule)->tclassid) net->ipv4.fib_num_tclassid_users--; #endif net->ipv4.fib_has_custom_rules = true; fib_flush_external(rule->fr_net); +errout: + return err; } static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index c6d267442dac..66c1e4fbf884 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -213,7 +213,6 @@ static void free_fib_info_rcu(struct rcu_head *head) rt_fibinfo_free(&nexthop_nh->nh_rth_input); } endfor_nexthops(fi); - release_net(fi->fib_net); if (fi->fib_metrics != (u32 *) dst_default_metrics) kfree(fi->fib_metrics); kfree(fi); @@ -814,7 +813,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) } else fi->fib_metrics = (u32 *) dst_default_metrics; - fi->fib_net = hold_net(net); + fi->fib_net = net; fi->fib_protocol = cfg->fc_protocol; fi->fib_scope = cfg->fc_scope; fi->fib_flags = cfg->fc_flags; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 83290beaf7cf..2c7c299ee2b9 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -830,7 +830,7 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn) /* Double as long as the resulting node has a number of * nonempty nodes that are above the threshold. */ - while (should_inflate(tp, tn) && max_work--) { + while (should_inflate(tp, tn) && max_work) { tp = inflate(t, tn); if (!tp) { #ifdef CONFIG_IP_FIB_TRIE_STATS @@ -839,17 +839,21 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn) break; } + max_work--; tn = get_child(tp, cindex); } + /* update parent in case inflate failed */ + tp = node_parent(tn); + /* Return if at least one inflate is run */ if (max_work != MAX_WORK) - return node_parent(tn); + return tp; /* Halve as long as the number of empty children in this * node is above threshold. */ - while (should_halve(tp, tn) && max_work--) { + while (should_halve(tp, tn) && max_work) { tp = halve(t, tn); if (!tp) { #ifdef CONFIG_IP_FIB_TRIE_STATS @@ -858,6 +862,7 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn) break; } + max_work--; tn = get_child(tp, cindex); } @@ -865,7 +870,7 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn) if (should_collapse(tn)) return collapse(t, tn); - /* update parent in case inflate or halve failed */ + /* update parent in case halve failed */ tp = node_parent(tn); /* Return if at least one deflate was run */ @@ -950,7 +955,7 @@ static struct key_vector *fib_find_node(struct trie *t, * priority less than or equal to PRIO. */ static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen, - u8 tos, u32 prio) + u8 tos, u32 prio, u32 tb_id) { struct fib_alias *fa; @@ -962,6 +967,10 @@ static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen, continue; if (fa->fa_slen != slen) break; + if (fa->tb_id > tb_id) + continue; + if (fa->tb_id != tb_id) + break; if (fa->fa_tos > tos) continue; if (fa->fa_info->fib_priority >= prio || fa->fa_tos < tos) @@ -1041,6 +1050,9 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp, hlist_for_each_entry(last, &l->leaf, fa_list) { if (new->fa_slen < last->fa_slen) break; + if ((new->fa_slen == last->fa_slen) && + (new->tb_id > last->tb_id)) + break; fa = last; } @@ -1089,7 +1101,8 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) } l = fib_find_node(t, &tp, key); - fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority) : NULL; + fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority, + tb->tb_id) : NULL; /* Now fa, if non-NULL, points to the first fib alias * with the same keys [prefix,tos,priority], if such key already @@ -1116,7 +1129,9 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) fa_match = NULL; fa_first = fa; hlist_for_each_entry_from(fa, fa_list) { - if ((fa->fa_slen != slen) || (fa->fa_tos != tos)) + if ((fa->fa_slen != slen) || + (fa->tb_id != tb->tb_id) || + (fa->fa_tos != tos)) break; if (fa->fa_info->fib_priority != fi->fib_priority) break; @@ -1197,6 +1212,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) new_fa->fa_type = cfg->fc_type; new_fa->fa_state = 0; new_fa->fa_slen = slen; + new_fa->tb_id = tb->tb_id; /* (Optionally) offload fib entry to switch hardware. */ err = netdev_switch_fib_ipv4_add(key, plen, fi, tos, @@ -1217,7 +1233,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) tb->tb_num_default++; rt_cache_flush(cfg->fc_nlinfo.nl_net); - rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, + rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id, &cfg->fc_nlinfo, 0); succeeded: return 0; @@ -1243,7 +1259,7 @@ static inline t_key prefix_mismatch(t_key key, struct key_vector *n) int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct fib_result *res, int fib_flags) { - struct trie *t = (struct trie *)tb->tb_data; + struct trie *t = (struct trie *) tb->tb_data; #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats __percpu *stats = t->stats; #endif @@ -1470,7 +1486,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) if (!l) return -ESRCH; - fa = fib_find_alias(&l->leaf, slen, tos, 0); + fa = fib_find_alias(&l->leaf, slen, tos, 0, tb->tb_id); if (!fa) return -ESRCH; @@ -1480,7 +1496,9 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) hlist_for_each_entry_from(fa, fa_list) { struct fib_info *fi = fa->fa_info; - if ((fa->fa_slen != slen) || (fa->fa_tos != tos)) + if ((fa->fa_slen != slen) || + (fa->tb_id != tb->tb_id) || + (fa->fa_tos != tos)) break; if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) && @@ -1576,6 +1594,120 @@ found: return n; } +static void fib_trie_free(struct fib_table *tb) +{ + struct trie *t = (struct trie *)tb->tb_data; + struct key_vector *pn = t->kv; + unsigned long cindex = 1; + struct hlist_node *tmp; + struct fib_alias *fa; + + /* walk trie in reverse order and free everything */ + for (;;) { + struct key_vector *n; + + if (!(cindex--)) { + t_key pkey = pn->key; + + if (IS_TRIE(pn)) + break; + + n = pn; + pn = node_parent(pn); + + /* drop emptied tnode */ + put_child_root(pn, n->key, NULL); + node_free(n); + + cindex = get_index(pkey, pn); + + continue; + } + + /* grab the next available node */ + n = get_child(pn, cindex); + if (!n) + continue; + + if (IS_TNODE(n)) { + /* record pn and cindex for leaf walking */ + pn = n; + cindex = 1ul << n->bits; + + continue; + } + + hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { + hlist_del_rcu(&fa->fa_list); + alias_free_mem_rcu(fa); + } + + put_child_root(pn, n->key, NULL); + node_free(n); + } + +#ifdef CONFIG_IP_FIB_TRIE_STATS + free_percpu(t->stats); +#endif + kfree(tb); +} + +struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) +{ + struct trie *ot = (struct trie *)oldtb->tb_data; + struct key_vector *l, *tp = ot->kv; + struct fib_table *local_tb; + struct fib_alias *fa; + struct trie *lt; + t_key key = 0; + + if (oldtb->tb_data == oldtb->__data) + return oldtb; + + local_tb = fib_trie_table(RT_TABLE_LOCAL, NULL); + if (!local_tb) + return NULL; + + lt = (struct trie *)local_tb->tb_data; + + while ((l = leaf_walk_rcu(&tp, key)) != NULL) { + struct key_vector *local_l = NULL, *local_tp; + + hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + struct fib_alias *new_fa; + + if (local_tb->tb_id != fa->tb_id) + continue; + + /* clone fa for new local table */ + new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); + if (!new_fa) + goto out; + + memcpy(new_fa, fa, sizeof(*fa)); + + /* insert clone into table */ + if (!local_l) + local_l = fib_find_node(lt, &local_tp, l->key); + + if (fib_insert_alias(lt, local_tp, local_l, new_fa, + NULL, l->key)) + goto out; + } + + /* stop loop if key wrapped back to 0 */ + key = l->key + 1; + if (key < l->key) + break; + } + + return local_tb; +out: + fib_trie_free(local_tb); + + return NULL; +} + /* Caller must hold RTNL */ void fib_table_flush_external(struct fib_table *tb) { @@ -1587,6 +1719,7 @@ void fib_table_flush_external(struct fib_table *tb) /* walk trie in reverse order */ for (;;) { + unsigned char slen = 0; struct key_vector *n; if (!(cindex--)) { @@ -1596,8 +1729,8 @@ void fib_table_flush_external(struct fib_table *tb) if (IS_TRIE(pn)) break; - /* no need to resize like in flush below */ - pn = node_parent(pn); + /* resize completed node */ + pn = resize(t, pn); cindex = get_index(pkey, pn); continue; @@ -1619,6 +1752,18 @@ void fib_table_flush_external(struct fib_table *tb) hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { struct fib_info *fi = fa->fa_info; + /* if alias was cloned to local then we just + * need to remove the local copy from main + */ + if (tb->tb_id != fa->tb_id) { + hlist_del_rcu(&fa->fa_list); + alias_free_mem_rcu(fa); + continue; + } + + /* record local slen */ + slen = fa->fa_slen; + if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL)) continue; @@ -1627,6 +1772,16 @@ void fib_table_flush_external(struct fib_table *tb) fi, fa->fa_tos, fa->fa_type, tb->tb_id); } + + /* update leaf slen */ + n->slen = slen; + + if (hlist_empty(&n->leaf)) { + put_child_root(pn, n->key, NULL); + node_free(n); + } else { + leaf_pull_suffix(pn, n); + } } } @@ -1711,7 +1866,8 @@ static void __trie_free_rcu(struct rcu_head *head) #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie *t = (struct trie *)tb->tb_data; - free_percpu(t->stats); + if (tb->tb_data == tb->__data) + free_percpu(t->stats); #endif /* CONFIG_IP_FIB_TRIE_STATS */ kfree(tb); } @@ -1738,6 +1894,11 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, continue; } + if (tb->tb_id != fa->tb_id) { + i++; + continue; + } + if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWROUTE, @@ -1804,18 +1965,26 @@ void __init fib_trie_init(void) 0, SLAB_PANIC, NULL); } -struct fib_table *fib_trie_table(u32 id) +struct fib_table *fib_trie_table(u32 id, struct fib_table *alias) { struct fib_table *tb; struct trie *t; + size_t sz = sizeof(*tb); + + if (!alias) + sz += sizeof(struct trie); - tb = kzalloc(sizeof(*tb) + sizeof(struct trie), GFP_KERNEL); + tb = kzalloc(sz, GFP_KERNEL); if (tb == NULL) return NULL; tb->tb_id = id; tb->tb_default = -1; tb->tb_num_default = 0; + tb->tb_data = (alias ? alias->__data : tb->__data); + + if (alias) + return tb; t = (struct trie *) tb->tb_data; t->kv[0].pos = KEYLENGTH; @@ -2381,6 +2550,8 @@ static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info */ static int fib_route_seq_show(struct seq_file *seq, void *v) { + struct fib_route_iter *iter = seq->private; + struct fib_table *tb = iter->main_tb; struct fib_alias *fa; struct key_vector *l = v; __be32 prefix; @@ -2403,6 +2574,9 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) (fa->fa_type == RTN_MULTICAST)) continue; + if (fa->tb_id != tb->tb_id) + continue; + seq_setwidth(seq, 127); if (fi) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 5cb1ef4ce292..ad3f866085de 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1850,7 +1850,10 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc) pmc->sfcount[MCAST_EXCLUDE] = 1; } -int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) +/* Join a multicast group + */ + +int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) { __be32 addr = imr->imr_multiaddr.s_addr; struct ip_mc_socklist *iml, *i; @@ -1898,20 +1901,6 @@ int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) done: return err; } -EXPORT_SYMBOL(__ip_mc_join_group); - -/* Join a multicast group - */ -int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr) -{ - int ret; - - rtnl_lock(); - ret = __ip_mc_join_group(sk, imr); - rtnl_unlock(); - - return ret; -} EXPORT_SYMBOL(ip_mc_join_group); static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, @@ -1934,7 +1923,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, return err; } -int __ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) +int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml; @@ -1979,18 +1968,6 @@ int __ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) out: return ret; } -EXPORT_SYMBOL(__ip_mc_leave_group); - -int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) -{ - int ret; - - rtnl_lock(); - ret = __ip_mc_leave_group(sk, imr); - rtnl_unlock(); - - return ret; -} EXPORT_SYMBOL(ip_mc_leave_group); int ip_mc_source(int add, int omode, struct sock *sk, struct @@ -2010,7 +1987,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct if (!ipv4_is_multicast(addr)) return -EINVAL; - rtnl_lock(); + ASSERT_RTNL(); imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr; imr.imr_address.s_addr = mreqs->imr_interface; @@ -2124,9 +2101,8 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1, &mreqs->imr_sourceaddr, 1); done: - rtnl_unlock(); if (leavegroup) - return ip_mc_leave_group(sk, &imr); + err = ip_mc_leave_group(sk, &imr); return err; } @@ -2148,7 +2124,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) msf->imsf_fmode != MCAST_EXCLUDE) return -EINVAL; - rtnl_lock(); + ASSERT_RTNL(); imr.imr_multiaddr.s_addr = msf->imsf_multiaddr; imr.imr_address.s_addr = msf->imsf_interface; @@ -2210,7 +2186,6 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) pmc->sfmode = msf->imsf_fmode; err = 0; done: - rtnl_unlock(); if (leavegroup) err = ip_mc_leave_group(sk, &imr); return err; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 14d02ea905b6..79c0c9439fdc 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -23,6 +23,7 @@ #include <net/route.h> #include <net/tcp_states.h> #include <net/xfrm.h> +#include <net/tcp.h> #ifdef INET_CSK_DEBUG const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; @@ -268,6 +269,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) release_sock(sk); if (reqsk_queue_empty(&icsk->icsk_accept_queue)) timeo = schedule_timeout(timeo); + sched_annotate_sleep(); lock_sock(sk); err = 0; if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) @@ -293,8 +295,8 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) { struct inet_connection_sock *icsk = inet_csk(sk); struct request_sock_queue *queue = &icsk->icsk_accept_queue; - struct sock *newsk; struct request_sock *req; + struct sock *newsk; int error; lock_sock(sk); @@ -323,9 +325,11 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) newsk = req->sk; sk_acceptq_removed(sk); - if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) { + if (sk->sk_protocol == IPPROTO_TCP && + tcp_rsk(req)->tfo_listener && + queue->fastopenq) { spin_lock_bh(&queue->fastopenq->lock); - if (tcp_rsk(req)->listener) { + if (tcp_rsk(req)->tfo_listener) { /* We are still waiting for the final ACK from 3WHS * so can't free req now. Instead, we set req->sk to * NULL to signify that the child socket is taken @@ -340,7 +344,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) out: release_sock(sk); if (req) - __reqsk_free(req); + reqsk_put(req); return newsk; out_err: newsk = NULL; @@ -399,18 +403,17 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4, const struct request_sock *req) { - struct rtable *rt; const struct inet_request_sock *ireq = inet_rsk(req); - struct ip_options_rcu *opt = inet_rsk(req)->opt; - struct net *net = sock_net(sk); - int flags = inet_sk_flowi_flags(sk); + struct net *net = read_pnet(&ireq->ireq_net); + struct ip_options_rcu *opt = ireq->opt; + struct rtable *rt; - flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark, + flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, - sk->sk_protocol, - flags, + sk->sk_protocol, inet_sk_flowi_flags(sk), (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, - ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport); + ireq->ir_loc_addr, ireq->ir_rmt_port, + htons(ireq->ir_num)); security_req_classify_flow(req, flowi4_to_flowi(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) @@ -432,9 +435,9 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk, const struct request_sock *req) { const struct inet_request_sock *ireq = inet_rsk(req); + struct net *net = read_pnet(&ireq->ireq_net); struct inet_sock *newinet = inet_sk(newsk); struct ip_options_rcu *opt; - struct net *net = sock_net(sk); struct flowi4 *fl4; struct rtable *rt; @@ -442,11 +445,12 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk, rcu_read_lock(); opt = rcu_dereference(newinet->inet_opt); - flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark, + flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, - ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport); + ireq->ir_loc_addr, ireq->ir_rmt_port, + htons(ireq->ir_num)); security_req_classify_flow(req, flowi4_to_flowi(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) @@ -474,33 +478,37 @@ static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, #if IS_ENABLED(CONFIG_IPV6) #define AF_INET_FAMILY(fam) ((fam) == AF_INET) #else -#define AF_INET_FAMILY(fam) 1 +#define AF_INET_FAMILY(fam) true #endif -struct request_sock *inet_csk_search_req(const struct sock *sk, - struct request_sock ***prevp, - const __be16 rport, const __be32 raddr, +/* Note: this is temporary : + * req sock will no longer be in listener hash table +*/ +struct request_sock *inet_csk_search_req(struct sock *sk, + const __be16 rport, + const __be32 raddr, const __be32 laddr) { - const struct inet_connection_sock *icsk = inet_csk(sk); + struct inet_connection_sock *icsk = inet_csk(sk); struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; - struct request_sock *req, **prev; + struct request_sock *req; + u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd, + lopt->nr_table_entries); - for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, - lopt->nr_table_entries)]; - (req = *prev) != NULL; - prev = &req->dl_next) { + spin_lock(&icsk->icsk_accept_queue.syn_wait_lock); + for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { const struct inet_request_sock *ireq = inet_rsk(req); if (ireq->ir_rmt_port == rport && ireq->ir_rmt_addr == raddr && ireq->ir_loc_addr == laddr && AF_INET_FAMILY(req->rsk_ops->family)) { + atomic_inc(&req->rsk_refcnt); WARN_ON(req->sk); - *prevp = prev; break; } } + spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock); return req; } @@ -556,23 +564,24 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req) } EXPORT_SYMBOL(inet_rtx_syn_ack); -void inet_csk_reqsk_queue_prune(struct sock *parent, - const unsigned long interval, - const unsigned long timeout, - const unsigned long max_rto) +static void reqsk_timer_handler(unsigned long data) { - struct inet_connection_sock *icsk = inet_csk(parent); + struct request_sock *req = (struct request_sock *)data; + struct sock *sk_listener = req->rsk_listener; + struct inet_connection_sock *icsk = inet_csk(sk_listener); struct request_sock_queue *queue = &icsk->icsk_accept_queue; struct listen_sock *lopt = queue->listen_opt; - int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; - int thresh = max_retries; - unsigned long now = jiffies; - struct request_sock **reqp, *req; - int i, budget; + int qlen, expire = 0, resend = 0; + int max_retries, thresh; + u8 defer_accept; - if (lopt == NULL || lopt->qlen == 0) + if (sk_listener->sk_state != TCP_LISTEN || !lopt) { + reqsk_put(req); return; + } + max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; + thresh = max_retries; /* Normally all the openreqs are young and become mature * (i.e. converted to established socket) for first timeout. * If synack was not acknowledged for 1 second, it means @@ -590,67 +599,65 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, * embrions; and abort old ones without pity, if old * ones are about to clog our table. */ - if (lopt->qlen>>(lopt->max_qlen_log-1)) { - int young = (lopt->qlen_young<<1); + qlen = listen_sock_qlen(lopt); + if (qlen >> (lopt->max_qlen_log - 1)) { + int young = listen_sock_young(lopt) << 1; while (thresh > 2) { - if (lopt->qlen < young) + if (qlen < young) break; thresh--; young <<= 1; } } + defer_accept = READ_ONCE(queue->rskq_defer_accept); + if (defer_accept) + max_retries = defer_accept; + syn_ack_recalc(req, thresh, max_retries, defer_accept, + &expire, &resend); + req->rsk_ops->syn_ack_timeout(req); + if (!expire && + (!resend || + !inet_rtx_syn_ack(sk_listener, req) || + inet_rsk(req)->acked)) { + unsigned long timeo; + + if (req->num_timeout++ == 0) + atomic_inc(&lopt->young_dec); + timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); + mod_timer_pinned(&req->rsk_timer, jiffies + timeo); + return; + } + inet_csk_reqsk_queue_drop(sk_listener, req); + reqsk_put(req); +} - if (queue->rskq_defer_accept) - max_retries = queue->rskq_defer_accept; - - budget = 2 * (lopt->nr_table_entries / (timeout / interval)); - i = lopt->clock_hand; - - do { - reqp=&lopt->syn_table[i]; - while ((req = *reqp) != NULL) { - if (time_after_eq(now, req->expires)) { - int expire = 0, resend = 0; - - syn_ack_recalc(req, thresh, max_retries, - queue->rskq_defer_accept, - &expire, &resend); - req->rsk_ops->syn_ack_timeout(parent, req); - if (!expire && - (!resend || - !inet_rtx_syn_ack(parent, req) || - inet_rsk(req)->acked)) { - unsigned long timeo; - - if (req->num_timeout++ == 0) - lopt->qlen_young--; - timeo = min(timeout << req->num_timeout, - max_rto); - req->expires = now + timeo; - reqp = &req->dl_next; - continue; - } - - /* Drop this request */ - inet_csk_reqsk_queue_unlink(parent, req, reqp); - reqsk_queue_removed(queue, req); - reqsk_free(req); - continue; - } - reqp = &req->dl_next; - } +void reqsk_queue_hash_req(struct request_sock_queue *queue, + u32 hash, struct request_sock *req, + unsigned long timeout) +{ + struct listen_sock *lopt = queue->listen_opt; - i = (i + 1) & (lopt->nr_table_entries - 1); + req->num_retrans = 0; + req->num_timeout = 0; + req->sk = NULL; - } while (--budget > 0); + /* before letting lookups find us, make sure all req fields + * are committed to memory and refcnt initialized. + */ + smp_wmb(); + atomic_set(&req->rsk_refcnt, 2); + setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); + req->rsk_hash = hash; - lopt->clock_hand = i; + spin_lock(&queue->syn_wait_lock); + req->dl_next = lopt->syn_table[hash]; + lopt->syn_table[hash] = req; + spin_unlock(&queue->syn_wait_lock); - if (lopt->qlen) - inet_csk_reset_keepalive_timer(parent, interval); + mod_timer_pinned(&req->rsk_timer, jiffies + timeout); } -EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); +EXPORT_SYMBOL(reqsk_queue_hash_req); /** * inet_csk_clone_lock - clone an inet socket, and lock its clone @@ -678,6 +685,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, newsk->sk_write_space = sk_stream_write_space; newsk->sk_mark = inet_rsk(req)->ir_mark; + atomic64_set(&newsk->sk_cookie, + atomic64_read(&inet_rsk(req)->ir_cookie)); newicsk->icsk_retransmits = 0; newicsk->icsk_backoff = 0; @@ -784,8 +793,6 @@ void inet_csk_listen_stop(struct sock *sk) struct request_sock *acc_req; struct request_sock *req; - inet_csk_delete_keepalive_timer(sk); - /* make all the listen_opt local to us */ acc_req = reqsk_queue_yank_acceptq(queue); @@ -815,9 +822,9 @@ void inet_csk_listen_stop(struct sock *sk) percpu_counter_inc(sk->sk_prot->orphan_count); - if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) { + if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { BUG_ON(tcp_sk(child)->fastopen_rsk != req); - BUG_ON(sk != tcp_rsk(req)->listener); + BUG_ON(sk != req->rsk_listener); /* Paranoid, to prevent race condition if * an inbound pkt destined for child is @@ -826,7 +833,6 @@ void inet_csk_listen_stop(struct sock *sk) * tcp_v4_destroy_sock(). */ tcp_sk(child)->fastopen_rsk = NULL; - sock_put(sk); } inet_csk_destroy_sock(child); @@ -835,7 +841,7 @@ void inet_csk_listen_stop(struct sock *sk) sock_put(child); sk_acceptq_removed(sk); - __reqsk_free(req); + reqsk_put(req); } if (queue->fastopenq != NULL) { /* Free all the reqs queued in rskq_rst_head. */ @@ -845,7 +851,7 @@ void inet_csk_listen_stop(struct sock *sk) spin_unlock_bh(&queue->fastopenq->lock); while ((req = acc_req) != NULL) { acc_req = req->dl_next; - __reqsk_free(req); + reqsk_put(req); } } WARN_ON(sk->sk_ack_backlog); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index ac3bfb458afd..76322c9867d5 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -44,10 +44,6 @@ struct inet_diag_entry { u16 dport; u16 family; u16 userlocks; -#if IS_ENABLED(CONFIG_IPV6) - struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */ - struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */ -#endif }; static DEFINE_MUTEX(inet_diag_table_mutex); @@ -70,6 +66,44 @@ static void inet_diag_unlock_handler(const struct inet_diag_handler *handler) mutex_unlock(&inet_diag_table_mutex); } +static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk) +{ + r->idiag_family = sk->sk_family; + + r->id.idiag_sport = htons(sk->sk_num); + r->id.idiag_dport = sk->sk_dport; + r->id.idiag_if = sk->sk_bound_dev_if; + sock_diag_save_cookie(sk, r->id.idiag_cookie); + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) { + *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr; + *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr; + } else +#endif + { + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); + + r->id.idiag_src[0] = sk->sk_rcv_saddr; + r->id.idiag_dst[0] = sk->sk_daddr; + } +} + +static size_t inet_sk_attr_size(void) +{ + return nla_total_size(sizeof(struct tcp_info)) + + nla_total_size(1) /* INET_DIAG_SHUTDOWN */ + + nla_total_size(1) /* INET_DIAG_TOS */ + + nla_total_size(1) /* INET_DIAG_TCLASS */ + + nla_total_size(sizeof(struct inet_diag_meminfo)) + + nla_total_size(sizeof(struct inet_diag_msg)) + + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) + + nla_total_size(TCP_CA_NAME_MAX) + + nla_total_size(sizeof(struct tcpvegas_info)) + + 64; +} + int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct sk_buff *skb, const struct inet_diag_req_v2 *req, struct user_namespace *user_ns, @@ -93,25 +127,13 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, return -EMSGSIZE; r = nlmsg_data(nlh); - BUG_ON(sk->sk_state == TCP_TIME_WAIT); + BUG_ON(!sk_fullsock(sk)); - r->idiag_family = sk->sk_family; + inet_diag_msg_common_fill(r, sk); r->idiag_state = sk->sk_state; r->idiag_timer = 0; r->idiag_retrans = 0; - r->id.idiag_if = sk->sk_bound_dev_if; - sock_diag_save_cookie(sk, r->id.idiag_cookie); - - r->id.idiag_sport = inet->inet_sport; - r->id.idiag_dport = inet->inet_dport; - - memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); - memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); - - r->id.idiag_src[0] = inet->inet_rcv_saddr; - r->id.idiag_dst[0] = inet->inet_daddr; - if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) goto errout; @@ -124,9 +146,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, #if IS_ENABLED(CONFIG_IPV6) if (r->idiag_family == AF_INET6) { - *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr; - *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr; - if (ext & (1 << (INET_DIAG_TCLASS - 1))) if (nla_put_u8(skb, INET_DIAG_TCLASS, inet6_sk(sk)->tclass) < 0) @@ -221,12 +240,12 @@ static int inet_csk_diag_fill(struct sock *sk, user_ns, portid, seq, nlmsg_flags, unlh); } -static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, +static int inet_twsk_diag_fill(struct sock *sk, struct sk_buff *skb, - const struct inet_diag_req_v2 *req, u32 portid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { + struct inet_timewait_sock *tw = inet_twsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; s32 tmo; @@ -243,21 +262,9 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, if (tmo < 0) tmo = 0; - r->idiag_family = tw->tw_family; + inet_diag_msg_common_fill(r, sk); r->idiag_retrans = 0; - r->id.idiag_if = tw->tw_bound_dev_if; - sock_diag_save_cookie(tw, r->id.idiag_cookie); - - r->id.idiag_sport = tw->tw_sport; - r->id.idiag_dport = tw->tw_dport; - - memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); - memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); - - r->id.idiag_src[0] = tw->tw_rcv_saddr; - r->id.idiag_dst[0] = tw->tw_daddr; - r->idiag_state = tw->tw_substate; r->idiag_timer = 3; r->idiag_expires = jiffies_to_msecs(tmo); @@ -265,12 +272,39 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, r->idiag_wqueue = 0; r->idiag_uid = 0; r->idiag_inode = 0; -#if IS_ENABLED(CONFIG_IPV6) - if (tw->tw_family == AF_INET6) { - *(struct in6_addr *)r->id.idiag_src = tw->tw_v6_rcv_saddr; - *(struct in6_addr *)r->id.idiag_dst = tw->tw_v6_daddr; - } -#endif + + nlmsg_end(skb, nlh); + return 0; +} + +static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, + u32 portid, u32 seq, u16 nlmsg_flags, + const struct nlmsghdr *unlh) +{ + struct inet_diag_msg *r; + struct nlmsghdr *nlh; + long tmo; + + nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), + nlmsg_flags); + if (!nlh) + return -EMSGSIZE; + + r = nlmsg_data(nlh); + inet_diag_msg_common_fill(r, sk); + r->idiag_state = TCP_SYN_RECV; + r->idiag_timer = 1; + r->idiag_retrans = inet_reqsk(sk)->num_retrans; + + BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) != + offsetof(struct sock, sk_cookie)); + + tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies; + r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0; + r->idiag_rqueue = 0; + r->idiag_wqueue = 0; + r->idiag_uid = 0; + r->idiag_inode = 0; nlmsg_end(skb, nlh); return 0; @@ -283,9 +317,13 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, const struct nlmsghdr *unlh) { if (sk->sk_state == TCP_TIME_WAIT) - return inet_twsk_diag_fill(inet_twsk(sk), skb, r, portid, seq, + return inet_twsk_diag_fill(sk, skb, portid, seq, nlmsg_flags, unlh); + if (sk->sk_state == TCP_NEW_SYN_RECV) + return inet_req_diag_fill(sk, skb, portid, seq, + nlmsg_flags, unlh); + return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, nlmsg_flags, unlh); } @@ -325,9 +363,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, if (err) goto out; - rep = nlmsg_new(sizeof(struct inet_diag_msg) + - sizeof(struct inet_diag_meminfo) + - sizeof(struct tcp_info) + 64, GFP_KERNEL); + rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL); if (!rep) { err = -ENOMEM; goto out; @@ -484,6 +520,23 @@ static int inet_diag_bc_run(const struct nlattr *_bc, return len == 0; } +/* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV) + */ +static void entry_fill_addrs(struct inet_diag_entry *entry, + const struct sock *sk) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) { + entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32; + entry->daddr = sk->sk_v6_daddr.s6_addr32; + } else +#endif + { + entry->saddr = &sk->sk_rcv_saddr; + entry->daddr = &sk->sk_daddr; + } +} + int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) { struct inet_sock *inet = inet_sk(sk); @@ -493,19 +546,10 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) return 1; entry.family = sk->sk_family; -#if IS_ENABLED(CONFIG_IPV6) - if (entry.family == AF_INET6) { - entry.saddr = sk->sk_v6_rcv_saddr.s6_addr32; - entry.daddr = sk->sk_v6_daddr.s6_addr32; - } else -#endif - { - entry.saddr = &inet->inet_rcv_saddr; - entry.daddr = &inet->inet_daddr; - } + entry_fill_addrs(&entry, sk); entry.sport = inet->inet_num; entry.dport = ntohs(inet->inet_dport); - entry.userlocks = (sk->sk_state != TCP_TIME_WAIT) ? sk->sk_userlocks : 0; + entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0; return inet_diag_bc_run(bc, &entry); } @@ -664,110 +708,6 @@ static void twsk_build_assert(void) #endif } -static int inet_twsk_diag_dump(struct sock *sk, - struct sk_buff *skb, - struct netlink_callback *cb, - const struct inet_diag_req_v2 *r, - const struct nlattr *bc) -{ - twsk_build_assert(); - - if (!inet_diag_bc_sk(bc, sk)) - return 0; - - return inet_twsk_diag_fill(inet_twsk(sk), skb, r, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); -} - -/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses - * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6. - */ -static void inet_diag_req_addrs(const struct sock *sk, - const struct request_sock *req, - struct inet_diag_entry *entry) -{ - const struct inet_request_sock *ireq = inet_rsk(req); - -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) { - if (req->rsk_ops->family == AF_INET6) { - entry->saddr = ireq->ir_v6_loc_addr.s6_addr32; - entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32; - } else if (req->rsk_ops->family == AF_INET) { - ipv6_addr_set_v4mapped(ireq->ir_loc_addr, - &entry->saddr_storage); - ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, - &entry->daddr_storage); - entry->saddr = entry->saddr_storage.s6_addr32; - entry->daddr = entry->daddr_storage.s6_addr32; - } - } else -#endif - { - entry->saddr = &ireq->ir_loc_addr; - entry->daddr = &ireq->ir_rmt_addr; - } -} - -static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, - struct request_sock *req, - struct user_namespace *user_ns, - u32 portid, u32 seq, - const struct nlmsghdr *unlh) -{ - const struct inet_request_sock *ireq = inet_rsk(req); - struct inet_sock *inet = inet_sk(sk); - struct inet_diag_msg *r; - struct nlmsghdr *nlh; - long tmo; - - nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), - NLM_F_MULTI); - if (!nlh) - return -EMSGSIZE; - - r = nlmsg_data(nlh); - r->idiag_family = sk->sk_family; - r->idiag_state = TCP_SYN_RECV; - r->idiag_timer = 1; - r->idiag_retrans = req->num_retrans; - - r->id.idiag_if = sk->sk_bound_dev_if; - sock_diag_save_cookie(req, r->id.idiag_cookie); - - tmo = req->expires - jiffies; - if (tmo < 0) - tmo = 0; - - r->id.idiag_sport = inet->inet_sport; - r->id.idiag_dport = ireq->ir_rmt_port; - - memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); - memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); - - r->id.idiag_src[0] = ireq->ir_loc_addr; - r->id.idiag_dst[0] = ireq->ir_rmt_addr; - - r->idiag_expires = jiffies_to_msecs(tmo); - r->idiag_rqueue = 0; - r->idiag_wqueue = 0; - r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); - r->idiag_inode = 0; -#if IS_ENABLED(CONFIG_IPV6) - if (r->idiag_family == AF_INET6) { - struct inet_diag_entry entry; - - inet_diag_req_addrs(sk, req, &entry); - memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr)); - memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr)); - } -#endif - - nlmsg_end(skb, nlh); - return 0; -} - static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, struct netlink_callback *cb, const struct inet_diag_req_v2 *r, @@ -788,10 +728,10 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, entry.family = sk->sk_family; - read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); + spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); lopt = icsk->icsk_accept_queue.listen_opt; - if (!lopt || !lopt->qlen) + if (!lopt || !listen_sock_qlen(lopt)) goto out; if (bc) { @@ -813,17 +753,18 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, continue; if (bc) { - inet_diag_req_addrs(sk, req, &entry); + /* Note: entry.sport and entry.userlocks are already set */ + entry_fill_addrs(&entry, req_to_sk(req)); entry.dport = ntohs(ireq->ir_rmt_port); if (!inet_diag_bc_run(bc, &entry)) continue; } - err = inet_diag_fill_req(skb, sk, req, - sk_user_ns(NETLINK_CB(cb->skb).sk), + err = inet_req_diag_fill(req_to_sk(req), skb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, cb->nlh); + cb->nlh->nlmsg_seq, + NLM_F_MULTI, cb->nlh); if (err < 0) { cb->args[3] = j + 1; cb->args[4] = reqnum; @@ -835,7 +776,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, } out: - read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); + spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); return err; } @@ -954,10 +895,16 @@ skip_listen_ht: if (r->id.idiag_dport != sk->sk_dport && r->id.idiag_dport) goto next_normal; - if (sk->sk_state == TCP_TIME_WAIT) - res = inet_twsk_diag_dump(sk, skb, cb, r, bc); - else - res = inet_csk_diag_dump(sk, skb, cb, r, bc); + twsk_build_assert(); + + if (!inet_diag_bc_sk(bc, sk)) + goto next_normal; + + res = sk_diag_fill(sk, skb, r, + sk_user_ns(NETLINK_CB(cb->skb).sk), + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + cb->nlh); if (res < 0) { spin_unlock_bh(lock); goto done; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 9111a4e22155..0fb841b9d834 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -24,9 +24,9 @@ #include <net/secure_seq.h> #include <net/ip.h> -static unsigned int inet_ehashfn(struct net *net, const __be32 laddr, - const __u16 lport, const __be32 faddr, - const __be16 fport) +static u32 inet_ehashfn(const struct net *net, const __be32 laddr, + const __u16 lport, const __be32 faddr, + const __be16 fport) { static u32 inet_ehash_secret __read_mostly; @@ -36,17 +36,21 @@ static unsigned int inet_ehashfn(struct net *net, const __be32 laddr, inet_ehash_secret + net_hash_mix(net)); } - -static unsigned int inet_sk_ehashfn(const struct sock *sk) +/* This function handles inet_sock, but also timewait and request sockets + * for IPv4/IPv6. + */ +u32 sk_ehashfn(const struct sock *sk) { - const struct inet_sock *inet = inet_sk(sk); - const __be32 laddr = inet->inet_rcv_saddr; - const __u16 lport = inet->inet_num; - const __be32 faddr = inet->inet_daddr; - const __be16 fport = inet->inet_dport; - struct net *net = sock_net(sk); - - return inet_ehashfn(net, laddr, lport, faddr, fport); +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6 && + !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) + return inet6_ehashfn(sock_net(sk), + &sk->sk_v6_rcv_saddr, sk->sk_num, + &sk->sk_v6_daddr, sk->sk_dport); +#endif + return inet_ehashfn(sock_net(sk), + sk->sk_rcv_saddr, sk->sk_num, + sk->sk_daddr, sk->sk_dport); } /* @@ -61,7 +65,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); if (tb != NULL) { - write_pnet(&tb->ib_net, hold_net(net)); + write_pnet(&tb->ib_net, net); tb->port = snum; tb->fastreuse = 0; tb->fastreuseport = 0; @@ -79,7 +83,6 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket { if (hlist_empty(&tb->owners)) { __hlist_del(&tb->node); - release_net(ib_net(tb)); kmem_cache_free(cachep, tb); } } @@ -263,11 +266,19 @@ void sock_gen_put(struct sock *sk) if (sk->sk_state == TCP_TIME_WAIT) inet_twsk_free(inet_twsk(sk)); + else if (sk->sk_state == TCP_NEW_SYN_RECV) + reqsk_free(inet_reqsk(sk)); else sk_free(sk); } EXPORT_SYMBOL_GPL(sock_gen_put); +void sock_edemux(struct sk_buff *skb) +{ + sock_gen_put(skb->sk); +} +EXPORT_SYMBOL(sock_edemux); + struct sock *__inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, const __be32 saddr, const __be16 sport, @@ -400,13 +411,13 @@ int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct hlist_nulls_head *list; - spinlock_t *lock; struct inet_ehash_bucket *head; + spinlock_t *lock; int twrefcnt = 0; WARN_ON(!sk_unhashed(sk)); - sk->sk_hash = inet_sk_ehashfn(sk); + sk->sk_hash = sk_ehashfn(sk); head = inet_ehash_bucket(hashinfo, sk->sk_hash); list = &head->chain; lock = inet_ehash_lockp(hashinfo, sk->sk_hash); @@ -423,15 +434,13 @@ int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw) } EXPORT_SYMBOL_GPL(__inet_hash_nolisten); -static void __inet_hash(struct sock *sk) +int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct inet_listen_hashbucket *ilb; - if (sk->sk_state != TCP_LISTEN) { - __inet_hash_nolisten(sk, NULL); - return; - } + if (sk->sk_state != TCP_LISTEN) + return __inet_hash_nolisten(sk, tw); WARN_ON(!sk_unhashed(sk)); ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; @@ -440,13 +449,15 @@ static void __inet_hash(struct sock *sk) __sk_nulls_add_node_rcu(sk, &ilb->head); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); spin_unlock(&ilb->lock); + return 0; } +EXPORT_SYMBOL(__inet_hash); void inet_hash(struct sock *sk) { if (sk->sk_state != TCP_CLOSE) { local_bh_disable(); - __inet_hash(sk); + __inet_hash(sk, NULL); local_bh_enable(); } } @@ -477,8 +488,7 @@ EXPORT_SYMBOL_GPL(inet_unhash); int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u32 port_offset, int (*check_established)(struct inet_timewait_death_row *, - struct sock *, __u16, struct inet_timewait_sock **), - int (*hash)(struct sock *sk, struct inet_timewait_sock *twp)) + struct sock *, __u16, struct inet_timewait_sock **)) { struct inet_hashinfo *hinfo = death_row->hashinfo; const unsigned short snum = inet_sk(sk)->inet_num; @@ -548,7 +558,7 @@ ok: inet_bind_hash(sk, tb, port); if (sk_unhashed(sk)) { inet_sk(sk)->inet_sport = htons(port); - twrefcnt += hash(sk, tw); + twrefcnt += __inet_hash_nolisten(sk, tw); } if (tw) twrefcnt += inet_twsk_bind_unhash(tw, hinfo); @@ -570,7 +580,7 @@ ok: tb = inet_csk(sk)->icsk_bind_hash; spin_lock_bh(&head->lock); if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { - hash(sk, NULL); + __inet_hash_nolisten(sk, NULL); spin_unlock_bh(&head->lock); return 0; } else { @@ -590,7 +600,7 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk), - __inet_check_established, __inet_hash_nolisten); + __inet_check_established); } EXPORT_SYMBOL_GPL(inet_hash_connect); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 6d592f8555fb..f38e387448fb 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -98,7 +98,6 @@ void inet_twsk_free(struct inet_timewait_sock *tw) #ifdef SOCK_REFCNT_DEBUG pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw); #endif - release_net(twsk_net(tw)); kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); module_put(owner); } @@ -195,7 +194,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat tw->tw_ipv6only = 0; tw->tw_transparent = inet->transparent; tw->tw_prot = sk->sk_prot_creator; - twsk_net_set(tw, hold_net(sock_net(sk))); + atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie)); + twsk_net_set(tw, sock_net(sk)); /* * Because we use RCU lookups, we should not set tw_refcnt * to a non null value before everything is setup for this @@ -487,6 +487,7 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, for (slot = 0; slot <= hashinfo->ehash_mask; slot++) { struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; restart_rcu: + cond_resched(); rcu_read_lock(); restart: sk_nulls_for_each_rcu(sk, node, &head->chain) { diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 787b3c294ce6..d9bc28ac5d1b 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -67,6 +67,7 @@ static int ip_forward_finish(struct sk_buff *skb) if (unlikely(opt->optlen)) ip_forward_options(skb); + skb_sender_cpu_clear(skb); return dst_output(skb); } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 5cd99271d3a6..f6a0d54b308a 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -536,12 +536,34 @@ out: * Socket option code for IP. This is the end of the line after any * TCP,UDP etc options on an IP socket. */ +static bool setsockopt_needs_rtnl(int optname) +{ + switch (optname) { + case IP_ADD_MEMBERSHIP: + case IP_ADD_SOURCE_MEMBERSHIP: + case IP_BLOCK_SOURCE: + case IP_DROP_MEMBERSHIP: + case IP_DROP_SOURCE_MEMBERSHIP: + case IP_MSFILTER: + case IP_UNBLOCK_SOURCE: + case MCAST_BLOCK_SOURCE: + case MCAST_MSFILTER: + case MCAST_JOIN_GROUP: + case MCAST_JOIN_SOURCE_GROUP: + case MCAST_LEAVE_GROUP: + case MCAST_LEAVE_SOURCE_GROUP: + case MCAST_UNBLOCK_SOURCE: + return true; + } + return false; +} static int do_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct inet_sock *inet = inet_sk(sk); int val = 0, err; + bool needs_rtnl = setsockopt_needs_rtnl(optname); switch (optname) { case IP_PKTINFO: @@ -584,6 +606,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, return ip_mroute_setsockopt(sk, optname, optval, optlen); err = 0; + if (needs_rtnl) + rtnl_lock(); lock_sock(sk); switch (optname) { @@ -1118,10 +1142,14 @@ mc_msf_out: break; } release_sock(sk); + if (needs_rtnl) + rtnl_unlock(); return err; e_inval: release_sock(sk); + if (needs_rtnl) + rtnl_unlock(); return -EINVAL; } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9d78427652d2..5b188832800f 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -73,9 +73,7 @@ struct mr_table { struct list_head list; -#ifdef CONFIG_NET_NS - struct net *net; -#endif + possible_net_t net; u32 id; struct sock __rcu *mroute_sk; struct timer_list ipmr_expire_timer; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index fd88f868776f..344e7cdfb8d4 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -64,11 +64,11 @@ EXPORT_SYMBOL_GPL(pingv6_ops); static u16 ping_port_rover; -static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask) +static inline u32 ping_hashfn(const struct net *net, u32 num, u32 mask) { - int res = (num + net_hash_mix(net)) & mask; + u32 res = (num + net_hash_mix(net)) & mask; - pr_debug("hash(%d) = %d\n", num, res); + pr_debug("hash(%u) = %u\n", num, res); return res; } EXPORT_SYMBOL_GPL(ping_hash); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 45fe60c5238e..805dc444741d 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -219,19 +219,20 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, } EXPORT_SYMBOL_GPL(__cookie_v4_check); -static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, - struct request_sock *req, - struct dst_entry *dst) +static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst) { struct inet_connection_sock *icsk = inet_csk(sk); struct sock *child; child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); - if (child) + if (child) { + atomic_set(&req->rsk_refcnt, 1); inet_csk_reqsk_queue_add(sk, req, child); - else + } else { reqsk_free(req); - + } return child; } @@ -325,7 +326,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; - req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ + req = inet_reqsk_alloc(&tcp_request_sock_ops, sk); /* for safety */ if (!req) goto out; @@ -336,8 +337,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) req->mss = mss; ireq->ir_num = ntohs(th->dest); ireq->ir_rmt_port = th->source; - ireq->ir_loc_addr = ip_hdr(skb)->daddr; - ireq->ir_rmt_addr = ip_hdr(skb)->saddr; + sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); + sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); ireq->ir_mark = inet_request_mark(sk, skb); ireq->snd_wscale = tcp_opt.snd_wscale; ireq->sack_ok = tcp_opt.sack_ok; @@ -345,7 +346,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ireq->tstamp_ok = tcp_opt.saw_tstamp; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0; - treq->listener = NULL; + treq->tfo_listener = false; + ireq->ireq_family = AF_INET; + + ireq->ir_iif = sk->sk_bound_dev_if; /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) @@ -357,7 +361,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) goto out; } - req->expires = 0UL; req->num_retrans = 0; /* diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index d4c3a5e66380..7a5ae50c80c8 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -378,6 +378,12 @@ EXPORT_SYMBOL_GPL(tcp_slow_start); */ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) { + /* If credits accumulated at a higher w, apply them gently now. */ + if (tp->snd_cwnd_cnt >= w) { + tp->snd_cwnd_cnt = 0; + tp->snd_cwnd++; + } + tp->snd_cwnd_cnt += acked; if (tp->snd_cwnd_cnt >= w) { u32 delta = tp->snd_cwnd_cnt / w; diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 4b276d1ed980..06d3d665a9fd 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -306,8 +306,10 @@ tcp_friendliness: } } - if (ca->cnt == 0) /* cannot be zero */ - ca->cnt = 1; + /* The maximum rate of cwnd increase CUBIC allows is 1 packet per + * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT. + */ + ca->cnt = max(ca->cnt, 2U); } static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index fe77417fc137..2eb887ec0ce3 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -155,12 +155,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, tp = tcp_sk(child); tp->fastopen_rsk = req; - /* Do a hold on the listner sk so that if the listener is being - * closed, the child that has been accepted can live on and still - * access listen_lock. - */ - sock_hold(sk); - tcp_rsk(req)->listener = sk; + tcp_rsk(req)->tfo_listener = true; /* RFC1323: The window in SYN & SYN/ACK segments is never * scaled. So correct it appropriately. @@ -174,6 +169,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, TCP_TIMEOUT_INIT, TCP_RTO_MAX); + atomic_set(&req->rsk_refcnt, 1); /* Add the child socket directly into the accept queue */ inet_csk_reqsk_queue_add(sk, req, child); @@ -244,7 +240,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk) struct request_sock *req1; spin_lock(&fastopenq->lock); req1 = fastopenq->rskq_rst_head; - if ((req1 == NULL) || time_after(req1->expires, jiffies)) { + if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { spin_unlock(&fastopenq->lock); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); @@ -253,7 +249,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk) fastopenq->rskq_rst_head = req1->dl_next; fastopenq->qlen--; spin_unlock(&fastopenq->lock); - reqsk_free(req1); + reqsk_put(req1); } return true; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fb4cf8b8e121..95caea707f54 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3321,6 +3321,36 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 return flag; } +/* Return true if we're currently rate-limiting out-of-window ACKs and + * thus shouldn't send a dupack right now. We rate-limit dupacks in + * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS + * attacks that send repeated SYNs or ACKs for the same connection. To + * do this, we do not send a duplicate SYNACK or ACK if the remote + * endpoint is sending out-of-window SYNs or pure ACKs at a high rate. + */ +bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, + int mib_idx, u32 *last_oow_ack_time) +{ + /* Data packets without SYNs are not likely part of an ACK loop. */ + if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && + !tcp_hdr(skb)->syn) + goto not_rate_limited; + + if (*last_oow_ack_time) { + s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); + + if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { + NET_INC_STATS_BH(net, mib_idx); + return true; /* rate-limited: don't send yet! */ + } + } + + *last_oow_ack_time = tcp_time_stamp; + +not_rate_limited: + return false; /* not rate-limited: go ahead, send dupack now! */ +} + /* RFC 5961 7 [ACK Throttling] */ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) { @@ -5664,7 +5694,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && sk->sk_state != TCP_FIN_WAIT1); - if (tcp_check_req(sk, skb, req, NULL, true) == NULL) + if (tcp_check_req(sk, skb, req, true) == NULL) goto discard; } @@ -5912,6 +5942,51 @@ static void tcp_ecn_create_request(struct request_sock *req, inet_rsk(req)->ecn_ok = 1; } +static void tcp_openreq_init(struct request_sock *req, + const struct tcp_options_received *rx_opt, + struct sk_buff *skb, const struct sock *sk) +{ + struct inet_request_sock *ireq = inet_rsk(req); + + req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ + req->cookie_ts = 0; + tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; + tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; + tcp_rsk(req)->snt_synack = tcp_time_stamp; + tcp_rsk(req)->last_oow_ack_time = 0; + req->mss = rx_opt->mss_clamp; + req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; + ireq->tstamp_ok = rx_opt->tstamp_ok; + ireq->sack_ok = rx_opt->sack_ok; + ireq->snd_wscale = rx_opt->snd_wscale; + ireq->wscale_ok = rx_opt->wscale_ok; + ireq->acked = 0; + ireq->ecn_ok = 0; + ireq->ir_rmt_port = tcp_hdr(skb)->source; + ireq->ir_num = ntohs(tcp_hdr(skb)->dest); + ireq->ir_mark = inet_request_mark(sk, skb); +} + +struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk_listener) +{ + struct request_sock *req = reqsk_alloc(ops, sk_listener); + + if (req) { + struct inet_request_sock *ireq = inet_rsk(req); + + kmemcheck_annotate_bitfield(ireq, flags); + ireq->opt = NULL; + atomic64_set(&ireq->ir_cookie, 0); + ireq->ireq_state = TCP_NEW_SYN_RECV; + write_pnet(&ireq->ireq_net, sock_net(sk_listener)); + + } + + return req; +} +EXPORT_SYMBOL(inet_reqsk_alloc); + int tcp_conn_request(struct request_sock_ops *rsk_ops, const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct sk_buff *skb) @@ -5949,7 +6024,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, goto drop; } - req = inet_reqsk_alloc(rsk_ops); + req = inet_reqsk_alloc(rsk_ops, sk); if (!req) goto drop; @@ -5966,6 +6041,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb, sk); + /* Note: tcp_v6_init_req() might override ir_iif for link locals */ + inet_rsk(req)->ir_iif = sk->sk_bound_dev_if; + af_ops->init_req(req, sk, skb); if (security_inet_conn_request(sk, skb, req)) @@ -6038,7 +6116,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (err || want_cookie) goto drop_and_free; - tcp_rsk(req)->listener = NULL; + tcp_rsk(req)->tfo_listener = false; af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f0c6fc32bfa8..4e90217003e8 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -189,7 +189,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (!inet->inet_saddr) inet->inet_saddr = fl4->saddr; - inet->inet_rcv_saddr = inet->inet_saddr; + sk_rcv_saddr_set(sk, inet->inet_saddr); if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { /* Reset inherited state */ @@ -204,7 +204,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) tcp_fetch_timewait_stamp(sk, &rt->dst); inet->inet_dport = usin->sin_port; - inet->inet_daddr = daddr; + sk_daddr_set(sk, daddr); inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt) @@ -310,6 +310,34 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk) dst->ops->redirect(dst, sk, skb); } + +/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ +void tcp_req_err(struct sock *sk, u32 seq) +{ + struct request_sock *req = inet_reqsk(sk); + struct net *net = sock_net(sk); + + /* ICMPs are not backlogged, hence we cannot get + * an established socket here. + */ + WARN_ON(req->sk); + + if (seq != tcp_rsk(req)->snt_isn) { + NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); + reqsk_put(req); + } else { + /* + * Still in SYN_RECV, just remove it silently. + * There is no good way to pass the error to the newly + * created socket, and POSIX does not want network + * errors returned from accept(). + */ + NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS); + inet_csk_reqsk_queue_drop(req->rsk_listener, req); + } +} +EXPORT_SYMBOL(tcp_req_err); + /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should @@ -343,8 +371,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) int err; struct net *net = dev_net(icmp_skb->dev); - sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest, - iph->saddr, th->source, inet_iif(icmp_skb)); + sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, + th->dest, iph->saddr, ntohs(th->source), + inet_iif(icmp_skb)); if (!sk) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; @@ -353,6 +382,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) inet_twsk_put(inet_twsk(sk)); return; } + seq = ntohl(th->seq); + if (sk->sk_state == TCP_NEW_SYN_RECV) + return tcp_req_err(sk, seq); bh_lock_sock(sk); /* If too many ICMPs get dropped on busy @@ -374,7 +406,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) icsk = inet_csk(sk); tp = tcp_sk(sk); - seq = ntohl(th->seq); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = tp->fastopen_rsk; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; @@ -458,36 +489,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) } switch (sk->sk_state) { - struct request_sock *req, **prev; - case TCP_LISTEN: - if (sock_owned_by_user(sk)) - goto out; - - req = inet_csk_search_req(sk, &prev, th->dest, - iph->daddr, iph->saddr); - if (!req) - goto out; - - /* ICMPs are not backlogged, hence we cannot get - an established socket here. - */ - WARN_ON(req->sk); - - if (seq != tcp_rsk(req)->snt_isn) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); - goto out; - } - - /* - * Still in SYN_RECV, just remove it silently. - * There is no good way to pass the error to the newly - * created socket, and POSIX does not want network - * errors returned from accept(). - */ - inet_csk_reqsk_queue_drop(sk, req, prev); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); - goto out; - case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is @@ -1219,15 +1220,16 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) #endif -static void tcp_v4_init_req(struct request_sock *req, struct sock *sk, +static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener, struct sk_buff *skb) { struct inet_request_sock *ireq = inet_rsk(req); - ireq->ir_loc_addr = ip_hdr(skb)->daddr; - ireq->ir_rmt_addr = ip_hdr(skb)->saddr; - ireq->no_srccheck = inet_sk(sk)->transparent; + sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); + sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); + ireq->no_srccheck = inet_sk(sk_listener)->transparent; ireq->opt = tcp_v4_save_options(skb); + ireq->ireq_family = AF_INET; } static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl, @@ -1318,8 +1320,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newtp = tcp_sk(newsk); newinet = inet_sk(newsk); ireq = inet_rsk(req); - newinet->inet_daddr = ireq->ir_rmt_addr; - newinet->inet_rcv_saddr = ireq->ir_loc_addr; + sk_daddr_set(newsk, ireq->ir_rmt_addr); + sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); newinet->inet_saddr = ireq->ir_loc_addr; inet_opt = ireq->opt; rcu_assign_pointer(newinet->inet_opt, inet_opt); @@ -1391,15 +1393,17 @@ EXPORT_SYMBOL(tcp_v4_syn_recv_sock); static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { - struct tcphdr *th = tcp_hdr(skb); + const struct tcphdr *th = tcp_hdr(skb); const struct iphdr *iph = ip_hdr(skb); + struct request_sock *req; struct sock *nsk; - struct request_sock **prev; - /* Find possible connection requests. */ - struct request_sock *req = inet_csk_search_req(sk, &prev, th->source, - iph->saddr, iph->daddr); - if (req) - return tcp_check_req(sk, skb, req, prev, false); + + req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); + if (req) { + nsk = tcp_check_req(sk, skb, req, false); + reqsk_put(req); + return nsk; + } nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr, th->source, iph->daddr, th->dest, inet_iif(skb)); @@ -1517,7 +1521,7 @@ void tcp_v4_early_demux(struct sk_buff *skb) if (sk) { skb->sk = sk; skb->destructor = sock_edemux; - if (sk->sk_state != TCP_TIME_WAIT) { + if (sk_fullsock(sk)) { struct dst_entry *dst = sk->sk_rx_dst; if (dst) @@ -1904,13 +1908,13 @@ get_req: } sk = sk_nulls_next(st->syn_wait_sk); st->state = TCP_SEQ_STATE_LISTENING; - read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); + spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); } else { icsk = inet_csk(sk); - read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); + spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); if (reqsk_queue_len(&icsk->icsk_accept_queue)) goto start_req; - read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); + spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); sk = sk_nulls_next(sk); } get_sk: @@ -1922,7 +1926,7 @@ get_sk: goto out; } icsk = inet_csk(sk); - read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); + spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); if (reqsk_queue_len(&icsk->icsk_accept_queue)) { start_req: st->uid = sock_i_uid(sk); @@ -1931,7 +1935,7 @@ start_req: st->sbucket = 0; goto get_req; } - read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); + spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); } spin_unlock_bh(&ilb->lock); st->offset = 0; @@ -2150,7 +2154,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) case TCP_SEQ_STATE_OPENREQ: if (v) { struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk); - read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); + spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); } case TCP_SEQ_STATE_LISTENING: if (v != SEQ_START_TOKEN) @@ -2204,17 +2208,17 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) } EXPORT_SYMBOL(tcp_proc_unregister); -static void get_openreq4(const struct sock *sk, const struct request_sock *req, +static void get_openreq4(const struct request_sock *req, struct seq_file *f, int i, kuid_t uid) { const struct inet_request_sock *ireq = inet_rsk(req); - long delta = req->expires - jiffies; + long delta = req->rsk_timer.expires - jiffies; seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK", i, ireq->ir_loc_addr, - ntohs(inet_sk(sk)->inet_sport), + ireq->ir_num, ireq->ir_rmt_addr, ntohs(ireq->ir_rmt_port), TCP_SYN_RECV, @@ -2225,7 +2229,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req, from_kuid_munged(seq_user_ns(f), uid), 0, /* non standard timer */ 0, /* open_requests have no inode */ - atomic_read(&sk->sk_refcnt), + 0, req); } @@ -2332,7 +2336,7 @@ static int tcp4_seq_show(struct seq_file *seq, void *v) get_tcp4_sock(v, seq, st->num); break; case TCP_SEQ_STATE_OPENREQ: - get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid); + get_openreq4(v, seq, st->num, st->uid); break; } out: diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index e5f41bd5ec1b..5bef3513af77 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -40,6 +40,7 @@ struct tcp_fastopen_metrics { struct tcp_metrics_block { struct tcp_metrics_block __rcu *tcpm_next; + possible_net_t tcpm_net; struct inetpeer_addr tcpm_saddr; struct inetpeer_addr tcpm_daddr; unsigned long tcpm_stamp; @@ -52,6 +53,11 @@ struct tcp_metrics_block { struct rcu_head rcu_head; }; +static inline struct net *tm_net(struct tcp_metrics_block *tm) +{ + return read_pnet(&tm->tcpm_net); +} + static bool tcp_metric_locked(struct tcp_metrics_block *tm, enum tcp_metric_index idx) { @@ -91,6 +97,9 @@ struct tcpm_hash_bucket { struct tcp_metrics_block __rcu *chain; }; +static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly; +static unsigned int tcp_metrics_hash_log __read_mostly; + static DEFINE_SPINLOCK(tcp_metrics_lock); static void tcpm_suck_dst(struct tcp_metrics_block *tm, @@ -143,6 +152,9 @@ static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst #define TCP_METRICS_RECLAIM_DEPTH 5 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL +#define deref_locked(p) \ + rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock)) + static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, struct inetpeer_addr *saddr, struct inetpeer_addr *daddr, @@ -171,9 +183,9 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, if (unlikely(reclaim)) { struct tcp_metrics_block *oldest; - oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); - for (tm = rcu_dereference(oldest->tcpm_next); tm; - tm = rcu_dereference(tm->tcpm_next)) { + oldest = deref_locked(tcp_metrics_hash[hash].chain); + for (tm = deref_locked(oldest->tcpm_next); tm; + tm = deref_locked(tm->tcpm_next)) { if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp)) oldest = tm; } @@ -183,14 +195,15 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, if (!tm) goto out_unlock; } + write_pnet(&tm->tcpm_net, net); tm->tcpm_saddr = *saddr; tm->tcpm_daddr = *daddr; tcpm_suck_dst(tm, dst, true); if (likely(!reclaim)) { - tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain; - rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm); + tm->tcpm_next = tcp_metrics_hash[hash].chain; + rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm); } out_unlock: @@ -214,10 +227,11 @@ static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *s struct tcp_metrics_block *tm; int depth = 0; - for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; + for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_saddr, saddr) && - addr_same(&tm->tcpm_daddr, daddr)) + addr_same(&tm->tcpm_daddr, daddr) && + net_eq(tm_net(tm), net)) break; depth++; } @@ -252,12 +266,14 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req, } net = dev_net(dst->dev); - hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); + hash ^= net_hash_mix(net); + hash = hash_32(hash, tcp_metrics_hash_log); - for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; + for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_saddr, &saddr) && - addr_same(&tm->tcpm_daddr, &daddr)) + addr_same(&tm->tcpm_daddr, &daddr) && + net_eq(tm_net(tm), net)) break; } tcpm_check_stamp(tm, dst); @@ -299,12 +315,14 @@ static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock return NULL; net = twsk_net(tw); - hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); + hash ^= net_hash_mix(net); + hash = hash_32(hash, tcp_metrics_hash_log); - for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; + for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_saddr, &saddr) && - addr_same(&tm->tcpm_daddr, &daddr)) + addr_same(&tm->tcpm_daddr, &daddr) && + net_eq(tm_net(tm), net)) break; } return tm; @@ -347,7 +365,8 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, return NULL; net = dev_net(dst->dev); - hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); + hash ^= net_hash_mix(net); + hash = hash_32(hash, tcp_metrics_hash_log); tm = __tcp_get_metrics(&saddr, &daddr, net, hash); if (tm == TCP_METRICS_RECLAIM_PTR) @@ -898,17 +917,19 @@ static int tcp_metrics_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); - unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log; + unsigned int max_rows = 1U << tcp_metrics_hash_log; unsigned int row, s_row = cb->args[0]; int s_col = cb->args[1], col = s_col; for (row = s_row; row < max_rows; row++, s_col = 0) { struct tcp_metrics_block *tm; - struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row; + struct tcpm_hash_bucket *hb = tcp_metrics_hash + row; rcu_read_lock(); for (col = 0, tm = rcu_dereference(hb->chain); tm; tm = rcu_dereference(tm->tcpm_next), col++) { + if (!net_eq(tm_net(tm), net)) + continue; if (col < s_col) continue; if (tcp_metrics_dump_info(skb, cb, tm) < 0) { @@ -994,13 +1015,15 @@ static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info) if (!reply) goto nla_put_failure; - hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); + hash ^= net_hash_mix(net); + hash = hash_32(hash, tcp_metrics_hash_log); ret = -ESRCH; rcu_read_lock(); - for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; + for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_daddr, &daddr) && - (!src || addr_same(&tm->tcpm_saddr, &saddr))) { + (!src || addr_same(&tm->tcpm_saddr, &saddr)) && + net_eq(tm_net(tm), net)) { ret = tcp_metrics_fill_info(msg, tm); break; } @@ -1020,34 +1043,27 @@ out_free: return ret; } -#define deref_locked_genl(p) \ - rcu_dereference_protected(p, lockdep_genl_is_held() && \ - lockdep_is_held(&tcp_metrics_lock)) - -#define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held()) - -static int tcp_metrics_flush_all(struct net *net) +static void tcp_metrics_flush_all(struct net *net) { - unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log; - struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash; + unsigned int max_rows = 1U << tcp_metrics_hash_log; + struct tcpm_hash_bucket *hb = tcp_metrics_hash; struct tcp_metrics_block *tm; unsigned int row; for (row = 0; row < max_rows; row++, hb++) { + struct tcp_metrics_block __rcu **pp; spin_lock_bh(&tcp_metrics_lock); - tm = deref_locked_genl(hb->chain); - if (tm) - hb->chain = NULL; - spin_unlock_bh(&tcp_metrics_lock); - while (tm) { - struct tcp_metrics_block *next; - - next = deref_genl(tm->tcpm_next); - kfree_rcu(tm, rcu_head); - tm = next; + pp = &hb->chain; + for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { + if (net_eq(tm_net(tm), net)) { + *pp = tm->tcpm_next; + kfree_rcu(tm, rcu_head); + } else { + pp = &tm->tcpm_next; + } } + spin_unlock_bh(&tcp_metrics_lock); } - return 0; } static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info) @@ -1064,19 +1080,23 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info) ret = parse_nl_addr(info, &daddr, &hash, 1); if (ret < 0) return ret; - if (ret > 0) - return tcp_metrics_flush_all(net); + if (ret > 0) { + tcp_metrics_flush_all(net); + return 0; + } ret = parse_nl_saddr(info, &saddr); if (ret < 0) src = false; - hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); - hb = net->ipv4.tcp_metrics_hash + hash; + hash ^= net_hash_mix(net); + hash = hash_32(hash, tcp_metrics_hash_log); + hb = tcp_metrics_hash + hash; pp = &hb->chain; spin_lock_bh(&tcp_metrics_lock); - for (tm = deref_locked_genl(*pp); tm; tm = deref_locked_genl(*pp)) { + for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { if (addr_same(&tm->tcpm_daddr, &daddr) && - (!src || addr_same(&tm->tcpm_saddr, &saddr))) { + (!src || addr_same(&tm->tcpm_saddr, &saddr)) && + net_eq(tm_net(tm), net)) { *pp = tm->tcpm_next; kfree_rcu(tm, rcu_head); found = true; @@ -1126,6 +1146,9 @@ static int __net_init tcp_net_metrics_init(struct net *net) size_t size; unsigned int slots; + if (!net_eq(net, &init_net)) + return 0; + slots = tcpmhash_entries; if (!slots) { if (totalram_pages >= 128 * 1024) @@ -1134,14 +1157,14 @@ static int __net_init tcp_net_metrics_init(struct net *net) slots = 8 * 1024; } - net->ipv4.tcp_metrics_hash_log = order_base_2(slots); - size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log; + tcp_metrics_hash_log = order_base_2(slots); + size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log; - net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); - if (!net->ipv4.tcp_metrics_hash) - net->ipv4.tcp_metrics_hash = vzalloc(size); + tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!tcp_metrics_hash) + tcp_metrics_hash = vzalloc(size); - if (!net->ipv4.tcp_metrics_hash) + if (!tcp_metrics_hash) return -ENOMEM; return 0; @@ -1149,19 +1172,7 @@ static int __net_init tcp_net_metrics_init(struct net *net) static void __net_exit tcp_net_metrics_exit(struct net *net) { - unsigned int i; - - for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) { - struct tcp_metrics_block *tm, *next; - - tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1); - while (tm) { - next = rcu_dereference_protected(tm->tcpm_next, 1); - kfree(tm); - tm = next; - } - } - kvfree(net->ipv4.tcp_metrics_hash); + tcp_metrics_flush_all(net); } static __net_initdata struct pernet_operations tcp_net_metrics_ops = { @@ -1175,16 +1186,10 @@ void __init tcp_metrics_init(void) ret = register_pernet_subsys(&tcp_net_metrics_ops); if (ret < 0) - goto cleanup; + panic("Could not allocate the tcp_metrics hash table\n"); + ret = genl_register_family_with_ops(&tcp_metrics_nl_family, tcp_metrics_nl_ops); if (ret < 0) - goto cleanup_subsys; - return; - -cleanup_subsys: - unregister_pernet_subsys(&tcp_net_metrics_ops); - -cleanup: - return; + panic("Could not register tcp_metrics generic netlink\n"); } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index dd11ac7798c6..274e96fb369b 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -572,7 +572,6 @@ EXPORT_SYMBOL(tcp_create_openreq_child); struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct request_sock **prev, bool fastopen) { struct tcp_options_received tmp_opt; @@ -630,8 +629,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, &tcp_rsk(req)->last_oow_ack_time) && !inet_rtx_syn_ack(sk, req)) - req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout, - TCP_RTO_MAX) + jiffies; + mod_timer_pending(&req->rsk_timer, jiffies + + min(TCP_TIMEOUT_INIT << req->num_timeout, + TCP_RTO_MAX)); return NULL; } @@ -766,7 +766,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, if (child == NULL) goto listen_overflow; - inet_csk_reqsk_queue_unlink(sk, req, prev); + inet_csk_reqsk_queue_unlink(sk, req); inet_csk_reqsk_queue_removed(sk, req); inet_csk_reqsk_queue_add(sk, req, child); @@ -791,7 +791,7 @@ embryonic_reset: tcp_reset(sk); } if (!fastopen) { - inet_csk_reqsk_queue_drop(sk, req, prev); + inet_csk_reqsk_queue_drop(sk, req); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); } return NULL; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5a73ad5afaf7..18474088c3d0 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2820,15 +2820,11 @@ void tcp_send_fin(struct sock *sk) } else { /* Socket is locked, keep trying until memory is available. */ for (;;) { - skb = alloc_skb_fclone(MAX_TCP_HEADER, - sk->sk_allocation); + skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); if (skb) break; yield(); } - - /* Reserve space for headers and prepare control bits. */ - skb_reserve(skb, MAX_TCP_HEADER); /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ tcp_init_nondata_skb(skb, tp->write_seq, TCPHDR_ACK | TCPHDR_FIN); @@ -2930,7 +2926,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, skb_reserve(skb, MAX_TCP_HEADER); skb_dst_set(skb, dst); - security_skb_owned_by(skb, sk); mss = dst_metric_advmss(dst); if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 15505936511d..2568fd282873 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -327,7 +327,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk) struct request_sock *req; req = tcp_sk(sk)->fastopen_rsk; - req->rsk_ops->syn_ack_timeout(sk, req); + req->rsk_ops->syn_ack_timeout(req); if (req->num_timeout >= max_retries) { tcp_write_err(sk); @@ -539,19 +539,11 @@ static void tcp_write_timer(unsigned long data) sock_put(sk); } -/* - * Timer for listening sockets - */ - -static void tcp_synack_timer(struct sock *sk) +void tcp_syn_ack_timeout(const struct request_sock *req) { - inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, - TCP_TIMEOUT_INIT, TCP_RTO_MAX); -} + struct net *net = read_pnet(&inet_rsk(req)->ireq_net); -void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req) -{ - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); + NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS); } EXPORT_SYMBOL(tcp_syn_ack_timeout); @@ -583,7 +575,7 @@ static void tcp_keepalive_timer (unsigned long data) } if (sk->sk_state == TCP_LISTEN) { - tcp_synack_timer(sk); + pr_err("Hmm... keepalive on a LISTEN ???\n"); goto out; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f27556e2158b..294af16633af 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -318,8 +318,8 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)); } -static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, - unsigned int port) +static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr, + unsigned int port) { return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; } @@ -421,9 +421,9 @@ static inline int compute_score2(struct sock *sk, struct net *net, return score; } -static unsigned int udp_ehashfn(struct net *net, const __be32 laddr, - const __u16 lport, const __be32 faddr, - const __be16 fport) +static u32 udp_ehashfn(const struct net *net, const __be32 laddr, + const __u16 lport, const __be32 faddr, + const __be16 fport) { static u32 udp_ehash_secret __read_mostly; diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index d5f6bd9a210a..dab73813cb92 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -63,6 +63,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) return err; IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; + skb->protocol = htons(ETH_P_IP); return x->outer_mode->output2(x, skb); } @@ -71,7 +72,6 @@ EXPORT_SYMBOL(xfrm4_prepare_output); int xfrm4_output_finish(struct sk_buff *skb) { memset(IPCB(skb), 0, sizeof(*IPCB(skb))); - skb->protocol = htons(ETH_P_IP); #ifdef CONFIG_NETFILTER IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 88d2cf0cae52..158378e73f0a 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2473,9 +2473,9 @@ static int ipv6_mc_config(struct sock *sk, bool join, lock_sock(sk); if (join) - ret = __ipv6_sock_mc_join(sk, ifindex, addr); + ret = ipv6_sock_mc_join(sk, ifindex, addr); else - ret = __ipv6_sock_mc_drop(sk, ifindex, addr); + ret = ipv6_sock_mc_drop(sk, ifindex, addr); release_sock(sk); return ret; diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index e43e79d0a612..3cc50e2d3bf5 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -29,9 +29,7 @@ * Policy Table */ struct ip6addrlbl_entry { -#ifdef CONFIG_NET_NS - struct net *lbl_net; -#endif + possible_net_t lbl_net; struct in6_addr prefix; int prefixlen; int ifindex; @@ -129,9 +127,6 @@ static const __net_initconst struct ip6addrlbl_init_table /* Object management */ static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p) { -#ifdef CONFIG_NET_NS - release_net(p->lbl_net); -#endif kfree(p); } @@ -240,9 +235,7 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net, newp->addrtype = addrtype; newp->label = label; INIT_HLIST_NODE(&newp->list); -#ifdef CONFIG_NET_NS - newp->lbl_net = hold_net(net); -#endif + write_pnet(&newp->lbl_net, net); atomic_set(&newp->refcnt, 1); return newp; } diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index baf2742d1ec4..9e6b0ee563f0 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -60,6 +60,8 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) int ishost = !net->ipv6.devconf_all->forwarding; int err = 0; + ASSERT_RTNL(); + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; if (ipv6_addr_is_multicast(addr)) @@ -73,7 +75,6 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) pac->acl_next = NULL; pac->acl_addr = *addr; - rtnl_lock(); if (ifindex == 0) { struct rt6_info *rt; @@ -130,7 +131,6 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) } error: - rtnl_unlock(); if (pac) sock_kfree_s(sk, pac, sizeof(*pac)); return err; @@ -146,7 +146,8 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) struct ipv6_ac_socklist *pac, *prev_pac; struct net *net = sock_net(sk); - rtnl_lock(); + ASSERT_RTNL(); + prev_pac = NULL; for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) { if ((ifindex == 0 || pac->acl_ifindex == ifindex) && @@ -154,10 +155,8 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) break; prev_pac = pac; } - if (!pac) { - rtnl_unlock(); + if (!pac) return -ENOENT; - } if (prev_pac) prev_pac->acl_next = pac->acl_next; else @@ -166,7 +165,6 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) dev = __dev_get_by_index(net, pac->acl_ifindex); if (dev) ipv6_dev_ac_dec(dev, &pac->acl_addr); - rtnl_unlock(); sock_kfree_s(sk, pac, sizeof(*pac)); return 0; diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index b4d5e1d97c1b..27ca79682efb 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -104,6 +104,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, goto again; flp6->saddr = saddr; } + err = rt->dst.error; goto out; } again: diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 29b32206e494..6927f3fb5597 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -112,22 +112,20 @@ static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, return c & (synq_hsize - 1); } -struct request_sock *inet6_csk_search_req(const struct sock *sk, - struct request_sock ***prevp, +struct request_sock *inet6_csk_search_req(struct sock *sk, const __be16 rport, const struct in6_addr *raddr, const struct in6_addr *laddr, const int iif) { - const struct inet_connection_sock *icsk = inet_csk(sk); + struct inet_connection_sock *icsk = inet_csk(sk); struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; - struct request_sock *req, **prev; + struct request_sock *req; + u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd, + lopt->nr_table_entries); - for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport, - lopt->hash_rnd, - lopt->nr_table_entries)]; - (req = *prev) != NULL; - prev = &req->dl_next) { + spin_lock(&icsk->icsk_accept_queue.syn_wait_lock); + for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { const struct inet_request_sock *ireq = inet_rsk(req); if (ireq->ir_rmt_port == rport && @@ -135,13 +133,14 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk, ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) && ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) && (!ireq->ir_iif || ireq->ir_iif == iif)) { + atomic_inc(&req->rsk_refcnt); WARN_ON(req->sk != NULL); - *prevp = prev; - return req; + break; } } + spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock); - return NULL; + return req; } EXPORT_SYMBOL_GPL(inet6_csk_search_req); diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 051dffb49c90..033f17816ef4 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -23,11 +23,9 @@ #include <net/secure_seq.h> #include <net/ip.h> -static unsigned int inet6_ehashfn(struct net *net, - const struct in6_addr *laddr, - const u16 lport, - const struct in6_addr *faddr, - const __be16 fport) +u32 inet6_ehashfn(const struct net *net, + const struct in6_addr *laddr, const u16 lport, + const struct in6_addr *faddr, const __be16 fport) { static u32 inet6_ehash_secret __read_mostly; static u32 ipv6_hash_secret __read_mostly; @@ -44,54 +42,6 @@ static unsigned int inet6_ehashfn(struct net *net, inet6_ehash_secret + net_hash_mix(net)); } -static int inet6_sk_ehashfn(const struct sock *sk) -{ - const struct inet_sock *inet = inet_sk(sk); - const struct in6_addr *laddr = &sk->sk_v6_rcv_saddr; - const struct in6_addr *faddr = &sk->sk_v6_daddr; - const __u16 lport = inet->inet_num; - const __be16 fport = inet->inet_dport; - struct net *net = sock_net(sk); - - return inet6_ehashfn(net, laddr, lport, faddr, fport); -} - -int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw) -{ - struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; - int twrefcnt = 0; - - WARN_ON(!sk_unhashed(sk)); - - if (sk->sk_state == TCP_LISTEN) { - struct inet_listen_hashbucket *ilb; - - ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; - spin_lock(&ilb->lock); - __sk_nulls_add_node_rcu(sk, &ilb->head); - spin_unlock(&ilb->lock); - } else { - unsigned int hash; - struct hlist_nulls_head *list; - spinlock_t *lock; - - sk->sk_hash = hash = inet6_sk_ehashfn(sk); - list = &inet_ehash_bucket(hashinfo, hash)->chain; - lock = inet_ehash_lockp(hashinfo, hash); - spin_lock(lock); - __sk_nulls_add_node_rcu(sk, list); - if (tw) { - WARN_ON(sk->sk_hash != tw->tw_hash); - twrefcnt = inet_twsk_unhash(tw); - } - spin_unlock(lock); - } - - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); - return twrefcnt; -} -EXPORT_SYMBOL(__inet6_hash); - /* * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM @@ -320,6 +270,6 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk), - __inet6_check_established, __inet6_hash); + __inet6_check_established); } EXPORT_SYMBOL_GPL(inet6_hash_connect); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index f45d6db50a45..457303886fd4 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -100,7 +100,6 @@ static void fl_free(struct ip6_flowlabel *fl) if (fl) { if (fl->share == IPV6_FL_S_PROCESS) put_pid(fl->owner.pid); - release_net(fl->fl_net); kfree(fl->opt); kfree_rcu(fl, rcu); } @@ -403,7 +402,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, } } - fl->fl_net = hold_net(net); + fl->fl_net = net; fl->expires = jiffies; err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); if (err) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 0a04a37305d5..7e80b61b51ff 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -318,6 +318,7 @@ static int ip6_forward_proxy_check(struct sk_buff *skb) static inline int ip6_forward_finish(struct sk_buff *skb) { + skb_sender_cpu_clear(skb); return dst_output(skb); } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 88300d42fc95..41f84f76ad9d 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -308,7 +308,7 @@ out: * Create tunnel matching given parameters. * * Return: - * created tunnel or NULL + * created tunnel or error pointer **/ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) @@ -316,7 +316,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) struct net_device *dev; struct ip6_tnl *t; char name[IFNAMSIZ]; - int err; + int err = -ENOMEM; if (p->name[0]) strlcpy(name, p->name, IFNAMSIZ); @@ -342,7 +342,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) failed_free: ip6_dev_free(dev); failed: - return NULL; + return ERR_PTR(err); } /** @@ -356,7 +356,7 @@ failed: * tunnel device is created and registered for use. * * Return: - * matching tunnel or NULL + * matching tunnel or error pointer **/ static struct ip6_tnl *ip6_tnl_locate(struct net *net, @@ -374,13 +374,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net, if (ipv6_addr_equal(local, &t->parms.laddr) && ipv6_addr_equal(remote, &t->parms.raddr)) { if (create) - return NULL; + return ERR_PTR(-EEXIST); return t; } } if (!create) - return NULL; + return ERR_PTR(-ENODEV); return ip6_tnl_create(net, p); } @@ -1414,7 +1414,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, 0); - if (t == NULL) + if (IS_ERR(t)) t = netdev_priv(dev); } else { memset(&p, 0, sizeof(p)); @@ -1439,7 +1439,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); if (cmd == SIOCCHGTUNNEL) { - if (t != NULL) { + if (!IS_ERR(t)) { if (t->dev != dev) { err = -EEXIST; break; @@ -1451,14 +1451,15 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) else err = ip6_tnl_update(t, &p1); } - if (t) { + if (!IS_ERR(t)) { err = 0; ip6_tnl_parm_to_user(&p, &t->parms); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) err = -EFAULT; - } else - err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); + } else { + err = PTR_ERR(t); + } break; case SIOCDELTUNNEL: err = -EPERM; @@ -1472,7 +1473,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) err = -ENOENT; ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, 0); - if (t == NULL) + if (IS_ERR(t)) break; err = -EPERM; if (t->dev == ip6n->fb_tnl_dev) @@ -1666,12 +1667,13 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net *net = dev_net(dev); - struct ip6_tnl *nt; + struct ip6_tnl *nt, *t; nt = netdev_priv(dev); ip6_tnl_netlink_parms(data, &nt->parms); - if (ip6_tnl_locate(net, &nt->parms, 0)) + t = ip6_tnl_locate(net, &nt->parms, 0); + if (!IS_ERR(t)) return -EEXIST; return ip6_tnl_create2(dev); @@ -1691,8 +1693,7 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[], ip6_tnl_netlink_parms(data, &p); t = ip6_tnl_locate(net, &p, 0); - - if (t) { + if (!IS_ERR(t)) { if (t->dev != dev) return -EEXIST; } else diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 34b682617f50..4b9315aa273e 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -56,9 +56,7 @@ struct mr6_table { struct list_head list; -#ifdef CONFIG_NET_NS - struct net *net; -#endif + possible_net_t net; u32 id; struct sock *mroute6_sk; struct timer_list ipmr_expire_timer; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 8d766d9100cb..9b2cb1444230 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -117,6 +117,25 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, return opt; } +static bool setsockopt_needs_rtnl(int optname) +{ + switch (optname) { + case IPV6_ADD_MEMBERSHIP: + case IPV6_DROP_MEMBERSHIP: + case IPV6_JOIN_ANYCAST: + case IPV6_LEAVE_ANYCAST: + case MCAST_JOIN_GROUP: + case MCAST_LEAVE_GROUP: + case MCAST_JOIN_SOURCE_GROUP: + case MCAST_LEAVE_SOURCE_GROUP: + case MCAST_BLOCK_SOURCE: + case MCAST_UNBLOCK_SOURCE: + case MCAST_MSFILTER: + return true; + } + return false; +} + static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { @@ -124,6 +143,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, struct net *net = sock_net(sk); int val, valbool; int retv = -ENOPROTOOPT; + bool needs_rtnl = setsockopt_needs_rtnl(optname); if (optval == NULL) val = 0; @@ -140,6 +160,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (ip6_mroute_opt(optname)) return ip6_mroute_setsockopt(sk, optname, optval, optlen); + if (needs_rtnl) + rtnl_lock(); lock_sock(sk); switch (optname) { @@ -624,10 +646,10 @@ done: psin6 = (struct sockaddr_in6 *)&greq.gr_group; if (optname == MCAST_JOIN_GROUP) retv = ipv6_sock_mc_join(sk, greq.gr_interface, - &psin6->sin6_addr); + &psin6->sin6_addr); else retv = ipv6_sock_mc_drop(sk, greq.gr_interface, - &psin6->sin6_addr); + &psin6->sin6_addr); break; } case MCAST_JOIN_SOURCE_GROUP: @@ -660,7 +682,7 @@ done: psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, - &psin6->sin6_addr); + &psin6->sin6_addr); /* prior join w/ different source is ok */ if (retv && retv != -EADDRINUSE) break; @@ -837,11 +859,15 @@ pref_skip_coa: } release_sock(sk); + if (needs_rtnl) + rtnl_unlock(); return retv; e_inval: release_sock(sk); + if (needs_rtnl) + rtnl_unlock(); return -EINVAL; } diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 1dd1fedff9f4..cbb66fd3da6d 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -132,7 +132,7 @@ static int unsolicited_report_interval(struct inet6_dev *idev) return iv > 0 ? iv : 1; } -int __ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) +int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) { struct net_device *dev = NULL; struct ipv6_mc_socklist *mc_lst; @@ -199,24 +199,12 @@ int __ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *add return 0; } -EXPORT_SYMBOL(__ipv6_sock_mc_join); - -int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) -{ - int ret; - - rtnl_lock(); - ret = __ipv6_sock_mc_join(sk, ifindex, addr); - rtnl_unlock(); - - return ret; -} EXPORT_SYMBOL(ipv6_sock_mc_join); /* * socket leave on multicast group */ -int __ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) +int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_mc_socklist *mc_lst; @@ -255,18 +243,6 @@ int __ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *add return -EADDRNOTAVAIL; } -EXPORT_SYMBOL(__ipv6_sock_mc_drop); - -int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) -{ - int ret; - - rtnl_lock(); - ret = __ipv6_sock_mc_drop(sk, ifindex, addr); - rtnl_unlock(); - - return ret; -} EXPORT_SYMBOL(ipv6_sock_mc_drop); /* called with rcu_read_lock() */ @@ -460,7 +436,7 @@ done: read_unlock_bh(&idev->lock); rcu_read_unlock(); if (leavegroup) - return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); + err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); return err; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 06fa819c43c9..58c0e6a4d15d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2398,6 +2398,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { [RTA_PRIORITY] = { .type = NLA_U32 }, [RTA_METRICS] = { .type = NLA_NESTED }, [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, + [RTA_PREF] = { .type = NLA_U8 }, }; static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, @@ -2405,6 +2406,7 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, { struct rtmsg *rtm; struct nlattr *tb[RTA_MAX+1]; + unsigned int pref; int err; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); @@ -2480,6 +2482,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); } + if (tb[RTA_PREF]) { + pref = nla_get_u8(tb[RTA_PREF]); + if (pref != ICMPV6_ROUTER_PREF_LOW && + pref != ICMPV6_ROUTER_PREF_HIGH) + pref = ICMPV6_ROUTER_PREF_MEDIUM; + cfg->fc_flags |= RTF_PREF(pref); + } + err = 0; errout: return err; @@ -2583,7 +2593,8 @@ static inline size_t rt6_nlmsg_size(void) + nla_total_size(4) /* RTA_PRIORITY */ + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */ + nla_total_size(sizeof(struct rta_cacheinfo)) - + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */ + + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ + + nla_total_size(1); /* RTA_PREF */ } static int rt6_fill_node(struct net *net, @@ -2724,6 +2735,9 @@ static int rt6_fill_node(struct net *net, if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0) goto nla_put_failure; + if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) + goto nla_put_failure; + nlmsg_end(skb, nlh); return 0; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 7337fc7947e2..2819137fc87d 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -49,11 +49,12 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct sock *child; child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); - if (child) + if (child) { + atomic_set(&req->rsk_refcnt, 1); inet_csk_reqsk_queue_add(sk, req, child); - else + } else { reqsk_free(req); - + } return child; } @@ -189,13 +190,14 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; - req = inet_reqsk_alloc(&tcp6_request_sock_ops); + req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk); if (!req) goto out; ireq = inet_rsk(req); treq = tcp_rsk(req); - treq->listener = NULL; + treq->tfo_listener = false; + ireq->ireq_family = AF_INET6; if (security_inet_conn_request(sk, skb, req)) goto out_free; @@ -220,7 +222,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ireq->ir_mark = inet_request_mark(sk, skb); - req->expires = 0UL; req->num_retrans = 0; ireq->snd_wscale = tcp_opt.snd_wscale; ireq->sack_ok = tcp_opt.sack_ok; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5d46832c6f72..4a4e6d30c448 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -104,19 +104,6 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) } } -static void tcp_v6_hash(struct sock *sk) -{ - if (sk->sk_state != TCP_CLOSE) { - if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) { - tcp_prot.hash(sk); - return; - } - local_bh_disable(); - __inet6_hash(sk, NULL); - local_bh_enable(); - } -} - static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) { return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, @@ -233,11 +220,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, tp->af_specific = &tcp_sock_ipv6_specific; #endif goto failure; - } else { - ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); - ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, - &sk->sk_v6_rcv_saddr); } + np->saddr = sk->sk_v6_rcv_saddr; return err; } @@ -340,18 +324,20 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); + struct net *net = dev_net(skb->dev); + struct request_sock *fastopen; struct ipv6_pinfo *np; - struct sock *sk; - int err; struct tcp_sock *tp; - struct request_sock *fastopen; __u32 seq, snd_una; - struct net *net = dev_net(skb->dev); + struct sock *sk; + int err; - sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, - th->dest, &hdr->saddr, th->source, skb->dev->ifindex); + sk = __inet6_lookup_established(net, &tcp_hashinfo, + &hdr->daddr, th->dest, + &hdr->saddr, ntohs(th->source), + skb->dev->ifindex); - if (sk == NULL) { + if (!sk) { ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; @@ -361,6 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, inet_twsk_put(inet_twsk(sk)); return; } + seq = ntohl(th->seq); + if (sk->sk_state == TCP_NEW_SYN_RECV) + return tcp_req_err(sk, seq); bh_lock_sock(sk); if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) @@ -375,7 +364,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } tp = tcp_sk(sk); - seq = ntohl(th->seq); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = tp->fastopen_rsk; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; @@ -419,31 +407,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, /* Might be for an request_sock */ switch (sk->sk_state) { - struct request_sock *req, **prev; - case TCP_LISTEN: - if (sock_owned_by_user(sk)) - goto out; - - /* Note : We use inet6_iif() here, not tcp_v6_iif() */ - req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, - &hdr->saddr, inet6_iif(skb)); - if (!req) - goto out; - - /* ICMPs are not backlogged, hence we cannot get - * an established socket here. - */ - WARN_ON(req->sk != NULL); - - if (seq != tcp_rsk(req)->snt_isn) { - NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); - goto out; - } - - inet_csk_reqsk_queue_drop(sk, req, prev); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); - goto out; - case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is @@ -734,8 +697,6 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; - ireq->ir_iif = sk->sk_bound_dev_if; - /* So that link locals have meaning */ if (!sk->sk_bound_dev_if && ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) @@ -749,6 +710,7 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, atomic_inc(&skb->users); ireq->pktopts = skb; } + ireq->ireq_family = AF_INET6; } static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl, @@ -997,17 +959,19 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) { - struct request_sock *req, **prev; const struct tcphdr *th = tcp_hdr(skb); + struct request_sock *req; struct sock *nsk; /* Find possible connection requests. */ - req = inet6_csk_search_req(sk, &prev, th->source, + req = inet6_csk_search_req(sk, th->source, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); - if (req) - return tcp_check_req(sk, skb, req, prev, false); - + if (req) { + nsk = tcp_check_req(sk, skb, req, false); + reqsk_put(req); + return nsk; + } nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, &ipv6_hdr(skb)->saddr, th->source, &ipv6_hdr(skb)->daddr, ntohs(th->dest), @@ -1079,11 +1043,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, memcpy(newnp, np, sizeof(struct ipv6_pinfo)); - ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr); - - ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); - - newsk->sk_v6_rcv_saddr = newnp->saddr; + newnp->saddr = newsk->sk_v6_rcv_saddr; inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; newsk->sk_backlog_rcv = tcp_v4_do_rcv; @@ -1232,7 +1192,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, tcp_done(newsk); goto out; } - __inet6_hash(newsk, NULL); + __inet_hash(newsk, NULL); return newsk; @@ -1584,7 +1544,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb) if (sk) { skb->sk = sk; skb->destructor = sock_edemux; - if (sk->sk_state != TCP_TIME_WAIT) { + if (sk_fullsock(sk)) { struct dst_entry *dst = sk->sk_rx_dst; if (dst) @@ -1689,9 +1649,9 @@ static void tcp_v6_destroy_sock(struct sock *sk) #ifdef CONFIG_PROC_FS /* Proc filesystem TCPv6 sock list dumping. */ static void get_openreq6(struct seq_file *seq, - const struct sock *sk, struct request_sock *req, int i, kuid_t uid) + struct request_sock *req, int i, kuid_t uid) { - int ttd = req->expires - jiffies; + long ttd = req->rsk_timer.expires - jiffies; const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; @@ -1827,7 +1787,7 @@ static int tcp6_seq_show(struct seq_file *seq, void *v) get_tcp6_sock(seq, v, st->num); break; case TCP_SEQ_STATE_OPENREQ: - get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid); + get_openreq6(seq, v, st->num, st->uid); break; } out: @@ -1891,7 +1851,7 @@ struct proto tcpv6_prot = { .sendpage = tcp_sendpage, .backlog_rcv = tcp_v6_do_rcv, .release_cb = tcp_release_cb, - .hash = tcp_v6_hash, + .hash = inet_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .enter_memory_pressure = tcp_enter_memory_pressure, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 70568a4548e4..7fe0329c0d37 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -53,11 +53,11 @@ #include <trace/events/skb.h> #include "udp_impl.h" -static unsigned int udp6_ehashfn(struct net *net, - const struct in6_addr *laddr, - const u16 lport, - const struct in6_addr *faddr, - const __be16 fport) +static u32 udp6_ehashfn(const struct net *net, + const struct in6_addr *laddr, + const u16 lport, + const struct in6_addr *faddr, + const __be16 fport) { static u32 udp6_ehash_secret __read_mostly; static u32 udp_ipv6_hash_secret __read_mostly; @@ -104,9 +104,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) return 0; } -static unsigned int udp6_portaddr_hash(struct net *net, - const struct in6_addr *addr6, - unsigned int port) +static u32 udp6_portaddr_hash(const struct net *net, + const struct in6_addr *addr6, + unsigned int port) { unsigned int hash, mix = net_hash_mix(net); diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index ab889bb16b3c..be2c0ba82c85 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -112,11 +112,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; - if (skb_shinfo(skb)->ip6_frag_id) - fptr->identification = skb_shinfo(skb)->ip6_frag_id; - else - ipv6_select_ident(fptr, - (struct rt6_info *)skb_dst(skb)); + if (!skb_shinfo(skb)->ip6_frag_id) + ipv6_proxy_select_ident(skb); + fptr->identification = skb_shinfo(skb)->ip6_frag_id; /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index ca3f29b98ae5..010f8bd2d577 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -114,6 +114,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) return err; skb->ignore_df = 1; + skb->protocol = htons(ETH_P_IPV6); return x->outer_mode->output2(x, skb); } @@ -122,7 +123,6 @@ EXPORT_SYMBOL(xfrm6_prepare_output); int xfrm6_output_finish(struct sk_buff *skb) { memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); - skb->protocol = htons(ETH_P_IPV6); #ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 8ddf2b545151..11dbcc1790d2 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -200,6 +200,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPPROTO_MH: + offset += ipv6_optlen(exthdr); if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { struct ip6_mh *mh; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 3afe36824703..8d53d65bd2ab 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -58,13 +58,24 @@ struct ieee80211_local; #define IEEE80211_UNSET_POWER_LEVEL INT_MIN /* - * Some APs experience problems when working with U-APSD. Decrease the - * probability of that happening by using legacy mode for all ACs but VO. - * The AP that caused us trouble was a Cisco 4410N. It ignores our - * setting, and always treats non-VO ACs as legacy. + * Some APs experience problems when working with U-APSD. Decreasing the + * probability of that happening by using legacy mode for all ACs but VO isn't + * enough. + * + * Cisco 4410N originally forced us to enable VO by default only because it + * treated non-VO ACs as legacy. + * + * However some APs (notably Netgear R7000) silently reclassify packets to + * different ACs. Since u-APSD ACs require trigger frames for frame retrieval + * clients would never see some frames (e.g. ARP responses) or would fetch them + * accidentally after a long time. + * + * It makes little sense to enable u-APSD queues by default because it needs + * userspace applications to be aware of it to actually take advantage of the + * possible additional powersavings. Implicitly depending on driver autotrigger + * frame support doesn't make much sense. */ -#define IEEE80211_DEFAULT_UAPSD_QUEUES \ - IEEE80211_WMM_IE_STA_QOSINFO_AC_VO +#define IEEE80211_DEFAULT_UAPSD_QUEUES 0 #define IEEE80211_DEFAULT_MAX_SP_LEN \ IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL @@ -453,6 +464,7 @@ struct ieee80211_if_managed { unsigned int flags; bool csa_waiting_bcn; + bool csa_ignored_same_chan; bool beacon_crc_valid; u32 beacon_crc; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9f6f3562396a..a4b1dd332e0f 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1150,6 +1150,17 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, return; } + if (cfg80211_chandef_identical(&csa_ie.chandef, + &sdata->vif.bss_conf.chandef)) { + if (ifmgd->csa_ignored_same_chan) + return; + sdata_info(sdata, + "AP %pM tries to chanswitch to same channel, ignore\n", + ifmgd->associated->bssid); + ifmgd->csa_ignored_same_chan = true; + return; + } + mutex_lock(&local->mtx); mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, @@ -1210,6 +1221,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, sdata->vif.csa_active = true; sdata->csa_chandef = csa_ie.chandef; sdata->csa_block_tx = csa_ie.mode; + ifmgd->csa_ignored_same_chan = false; if (sdata->csa_block_tx) ieee80211_stop_vif_queues(local, sdata, @@ -2090,6 +2102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, sdata->vif.csa_active = false; ifmgd->csa_waiting_bcn = false; + ifmgd->csa_ignored_same_chan = false; if (sdata->csa_block_tx) { ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_CSA); @@ -3204,7 +3217,8 @@ static const u64 care_about_ies = (1ULL << WLAN_EID_CHANNEL_SWITCH) | (1ULL << WLAN_EID_PWR_CONSTRAINT) | (1ULL << WLAN_EID_HT_CAPABILITY) | - (1ULL << WLAN_EID_HT_OPERATION); + (1ULL << WLAN_EID_HT_OPERATION) | + (1ULL << WLAN_EID_EXT_CHANSWITCH_ANN); static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 1101563357ea..944bdc04e913 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2214,6 +2214,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) hdr = (struct ieee80211_hdr *) skb->data; mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); + if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) + return RX_DROP_MONITOR; + /* frame is in RMC, don't forward */ if (ieee80211_is_data(hdr->frame_control) && is_multicast_ether_addr(hdr->addr1) && diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 8428f4a95479..747bdcf72e92 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3178,7 +3178,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, wdev_iter = &sdata_iter->wdev; if (sdata_iter == sdata || - rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL || + !ieee80211_sdata_running(sdata_iter) || local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype)) continue; diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h index 98180a9fff4a..a0533357b9ea 100644 --- a/net/mac802154/driver-ops.h +++ b/net/mac802154/driver-ops.h @@ -1,4 +1,4 @@ -#ifndef __MAC802154_DRVIER_OPS +#ifndef __MAC802154_DRIVER_OPS #define __MAC802154_DRIVER_OPS #include <linux/types.h> @@ -220,4 +220,4 @@ drv_set_promiscuous_mode(struct ieee802154_local *local, bool on) return local->ops->set_promiscuous_mode(&local->hw, on); } -#endif /* __MAC802154_DRVIER_OPS */ +#endif /* __MAC802154_DRIVER_OPS */ diff --git a/net/mac802154/util.c b/net/mac802154/util.c index 5fc979027919..150bf807e572 100644 --- a/net/mac802154/util.c +++ b/net/mac802154/util.c @@ -65,8 +65,19 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, { if (ifs_handling) { struct ieee802154_local *local = hw_to_local(hw); + u8 max_sifs_size; - if (skb->len > 18) + /* If transceiver sets CRC on his own we need to use lifs + * threshold len above 16 otherwise 18, because it's not + * part of skb->len. + */ + if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM) + max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE - + IEEE802154_FCS_LEN; + else + max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE; + + if (skb->len > max_sifs_size) hrtimer_start(&local->ifs_timer, ktime_set(0, hw->phy->lifs_period * NSEC_PER_USEC), HRTIMER_MODE_REL); diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig index dfca485863e9..17bde799c854 100644 --- a/net/mpls/Kconfig +++ b/net/mpls/Kconfig @@ -3,7 +3,7 @@ # menuconfig MPLS - tristate "MultiProtocol Label Switching" + bool "MultiProtocol Label Switching" default n ---help--- MultiProtocol Label Switching routes packets through logical @@ -16,14 +16,14 @@ menuconfig MPLS if MPLS config NET_MPLS_GSO - bool "MPLS: GSO support" + tristate "MPLS: GSO support" help This is helper module to allow segmentation of non-MPLS GSO packets that have had MPLS stack entries pushed onto them and thus become MPLS GSO packets. config MPLS_ROUTING - bool "MPLS: routing support" + tristate "MPLS: routing support" help Add support for forwarding of mpls packets. diff --git a/net/mpls/Makefile b/net/mpls/Makefile index 60af15f1960e..65bbe68c72e6 100644 --- a/net/mpls/Makefile +++ b/net/mpls/Makefile @@ -2,4 +2,6 @@ # Makefile for MPLS. # obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o -obj-$(CONFIG_MPLS_ROUTING) += af_mpls.o +obj-$(CONFIG_MPLS_ROUTING) += mpls_router.o + +mpls_router-y := af_mpls.o diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 0ad8f7141be2..db8a2ea6d4de 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -92,9 +92,24 @@ static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb, * The strange cases if we choose to support them will require * manual configuration. */ - struct iphdr *hdr4 = ip_hdr(skb); + struct iphdr *hdr4; bool success = true; + /* The IPv4 code below accesses through the IPv4 header + * checksum, which is 12 bytes into the packet. + * The IPv6 code below accesses through the IPv6 hop limit + * which is 8 bytes into the packet. + * + * For all supported cases there should always be at least 12 + * bytes of packet data present. The IPv4 header is 20 bytes + * without options and the IPv6 header is always 40 bytes + * long. + */ + if (!pskb_may_pull(skb, 12)) + return false; + + /* Use ip_hdr to find the ip protocol version */ + hdr4 = ip_hdr(skb); if (hdr4->version == 4) { skb->protocol = htons(ETH_P_IP); csum_replace2(&hdr4->check, diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 08d95559b6f7..19b9cce6c210 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1405,9 +1405,11 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) mreq.imr_ifindex = dev->ifindex; + rtnl_lock(); lock_sock(sk); ret = ip_mc_join_group(sk, &mreq); release_sock(sk); + rtnl_unlock(); return ret; } diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 3aedbda7658a..f35c15b0de6b 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -209,7 +209,7 @@ static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu) struct sock *sk = skb->sk; struct rtable *ort = skb_rtable(skb); - if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT) + if (!skb->dev && sk && sk_fullsock(sk)) ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu); } diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c index a2233e77cf39..2631876ac55b 100644 --- a/net/netfilter/nf_log_common.c +++ b/net/netfilter/nf_log_common.c @@ -133,7 +133,7 @@ EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header); void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk) { - if (!sk || sk->sk_state == TCP_TIME_WAIT) + if (!sk || !sk_fullsock(sk)) return; read_lock_bh(&sk->sk_callback_lock); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index db5e3a80fc6d..957b83a0223b 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -539,7 +539,7 @@ __build_packet_message(struct nfnl_log_net *log, /* UID */ sk = skb->sk; - if (sk && sk->sk_state != TCP_TIME_WAIT) { + if (sk && sk_fullsock(sk)) { read_lock_bh(&sk->sk_callback_lock); if (sk->sk_socket && sk->sk_socket->file) { struct file *file = sk->sk_socket->file; diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 0db8515e76da..86ee8b05adae 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -257,7 +257,7 @@ static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk) { const struct cred *cred; - if (sk->sk_state == TCP_TIME_WAIT) + if (!sk_fullsock(sk)) return 0; read_lock_bh(&sk->sk_callback_lock); diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index c82df0a48fcd..4585c5724391 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -29,6 +29,8 @@ struct nft_hash_elem { struct nft_data data[]; }; +static const struct rhashtable_params nft_hash_params; + static bool nft_hash_lookup(const struct nft_set *set, const struct nft_data *key, struct nft_data *data) @@ -36,7 +38,7 @@ static bool nft_hash_lookup(const struct nft_set *set, struct rhashtable *priv = nft_set_priv(set); const struct nft_hash_elem *he; - he = rhashtable_lookup(priv, key); + he = rhashtable_lookup_fast(priv, key, nft_hash_params); if (he && set->flags & NFT_SET_MAP) nft_data_copy(data, he->data); @@ -49,6 +51,7 @@ static int nft_hash_insert(const struct nft_set *set, struct rhashtable *priv = nft_set_priv(set); struct nft_hash_elem *he; unsigned int size; + int err; if (elem->flags != 0) return -EINVAL; @@ -65,9 +68,11 @@ static int nft_hash_insert(const struct nft_set *set, if (set->flags & NFT_SET_MAP) nft_data_copy(he->data, &elem->data); - rhashtable_insert(priv, &he->node); + err = rhashtable_insert_fast(priv, &he->node, nft_hash_params); + if (err) + kfree(he); - return 0; + return err; } static void nft_hash_elem_destroy(const struct nft_set *set, @@ -84,46 +89,26 @@ static void nft_hash_remove(const struct nft_set *set, { struct rhashtable *priv = nft_set_priv(set); - rhashtable_remove(priv, elem->cookie); + rhashtable_remove_fast(priv, elem->cookie, nft_hash_params); synchronize_rcu(); kfree(elem->cookie); } -struct nft_compare_arg { - const struct nft_set *set; - struct nft_set_elem *elem; -}; - -static bool nft_hash_compare(void *ptr, void *arg) -{ - struct nft_hash_elem *he = ptr; - struct nft_compare_arg *x = arg; - - if (!nft_data_cmp(&he->key, &x->elem->key, x->set->klen)) { - x->elem->cookie = he; - x->elem->flags = 0; - if (x->set->flags & NFT_SET_MAP) - nft_data_copy(&x->elem->data, he->data); - - return true; - } - - return false; -} - static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem) { struct rhashtable *priv = nft_set_priv(set); - struct nft_compare_arg arg = { - .set = set, - .elem = elem, - }; + struct nft_hash_elem *he; + + he = rhashtable_lookup_fast(priv, &elem->key, nft_hash_params); + if (!he) + return -ENOENT; - if (rhashtable_lookup_compare(priv, &elem->key, - &nft_hash_compare, &arg)) - return 0; + elem->cookie = he; + elem->flags = 0; + if (set->flags & NFT_SET_MAP) + nft_data_copy(&elem->data, he->data); - return -ENOENT; + return 0; } static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, @@ -181,18 +166,21 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[]) return sizeof(struct rhashtable); } +static const struct rhashtable_params nft_hash_params = { + .head_offset = offsetof(struct nft_hash_elem, node), + .key_offset = offsetof(struct nft_hash_elem, key), + .hashfn = jhash, +}; + static int nft_hash_init(const struct nft_set *set, const struct nft_set_desc *desc, const struct nlattr * const tb[]) { struct rhashtable *priv = nft_set_priv(set); - struct rhashtable_params params = { - .nelem_hint = desc->size ? : NFT_HASH_ELEMENT_HINT, - .head_offset = offsetof(struct nft_hash_elem, node), - .key_offset = offsetof(struct nft_hash_elem, key), - .key_len = set->klen, - .hashfn = jhash, - }; + struct rhashtable_params params = nft_hash_params; + + params.nelem_hint = desc->size ?: NFT_HASH_ELEMENT_HINT; + params.key_len = set->klen; return rhashtable_init(priv, ¶ms); } diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index e99911eda915..abe68119a76c 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -83,7 +83,7 @@ void nft_meta_get_eval(const struct nft_expr *expr, *(u16 *)dest->data = out->type; break; case NFT_META_SKUID: - if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT) + if (skb->sk == NULL || !sk_fullsock(skb->sk)) goto err; read_lock_bh(&skb->sk->sk_callback_lock); @@ -99,7 +99,7 @@ void nft_meta_get_eval(const struct nft_expr *expr, read_unlock_bh(&skb->sk->sk_callback_lock); break; case NFT_META_SKGID: - if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT) + if (skb->sk == NULL || !sk_fullsock(skb->sk)) goto err; read_lock_bh(&skb->sk->sk_callback_lock); diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index ef8a926752a9..165b77ce9aa9 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -42,15 +42,21 @@ enum nf_tproxy_lookup_t { static bool tproxy_sk_is_transparent(struct sock *sk) { - if (sk->sk_state != TCP_TIME_WAIT) { - if (inet_sk(sk)->transparent) - return true; - sock_put(sk); - } else { + switch (sk->sk_state) { + case TCP_TIME_WAIT: if (inet_twsk(sk)->tw_transparent) return true; - inet_twsk_put(inet_twsk(sk)); + break; + case TCP_NEW_SYN_RECV: + if (inet_rsk(inet_reqsk(sk))->no_srccheck) + return true; + break; + default: + if (inet_sk(sk)->transparent) + return true; } + + sock_gen_put(sk); return false; } diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 13332dbf291d..895534e87a47 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -129,6 +129,20 @@ xt_socket_get_sock_v4(struct net *net, const u8 protocol, return NULL; } +static bool xt_socket_sk_is_transparent(struct sock *sk) +{ + switch (sk->sk_state) { + case TCP_TIME_WAIT: + return inet_twsk(sk)->tw_transparent; + + case TCP_NEW_SYN_RECV: + return inet_rsk(inet_reqsk(sk))->no_srccheck; + + default: + return inet_sk(sk)->transparent; + } +} + static bool socket_match(const struct sk_buff *skb, struct xt_action_param *par, const struct xt_socket_mtinfo1 *info) @@ -195,16 +209,14 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, * unless XT_SOCKET_NOWILDCARD is set */ wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) && - sk->sk_state != TCP_TIME_WAIT && + sk_fullsock(sk) && inet_sk(sk)->inet_rcv_saddr == 0); /* Ignore non-transparent sockets, - if XT_SOCKET_TRANSPARENT is used */ + * if XT_SOCKET_TRANSPARENT is used + */ if (info->flags & XT_SOCKET_TRANSPARENT) - transparent = ((sk->sk_state != TCP_TIME_WAIT && - inet_sk(sk)->transparent) || - (sk->sk_state == TCP_TIME_WAIT && - inet_twsk(sk)->tw_transparent)); + transparent = xt_socket_sk_is_transparent(sk); if (sk != skb->sk) sock_gen_put(sk); @@ -363,16 +375,14 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) * unless XT_SOCKET_NOWILDCARD is set */ wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) && - sk->sk_state != TCP_TIME_WAIT && + sk_fullsock(sk) && ipv6_addr_any(&sk->sk_v6_rcv_saddr)); /* Ignore non-transparent sockets, - if XT_SOCKET_TRANSPARENT is used */ + * if XT_SOCKET_TRANSPARENT is used + */ if (info->flags & XT_SOCKET_TRANSPARENT) - transparent = ((sk->sk_state != TCP_TIME_WAIT && - inet_sk(sk)->transparent) || - (sk->sk_state == TCP_TIME_WAIT && - inet_twsk(sk)->tw_transparent)); + transparent = xt_socket_sk_is_transparent(sk); if (sk != skb->sk) sock_gen_put(sk); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 6b0f21950e09..651792141f07 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -116,6 +116,8 @@ static ATOMIC_NOTIFIER_HEAD(netlink_chain); static DEFINE_SPINLOCK(netlink_tap_lock); static struct list_head netlink_tap_all __read_mostly; +static const struct rhashtable_params netlink_rhashtable_params; + static inline u32 netlink_group_mask(u32 group) { return group ? 1 << (group - 1) : 0; @@ -970,41 +972,50 @@ netlink_unlock_table(void) struct netlink_compare_arg { - struct net *net; + possible_net_t pnet; u32 portid; }; -static bool netlink_compare(void *ptr, void *arg) +/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */ +#define netlink_compare_arg_len \ + (offsetof(struct netlink_compare_arg, portid) + sizeof(u32)) + +static inline int netlink_compare(struct rhashtable_compare_arg *arg, + const void *ptr) { - struct netlink_compare_arg *x = arg; - struct sock *sk = ptr; + const struct netlink_compare_arg *x = arg->key; + const struct netlink_sock *nlk = ptr; - return nlk_sk(sk)->portid == x->portid && - net_eq(sock_net(sk), x->net); + return nlk->portid != x->portid || + !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet)); +} + +static void netlink_compare_arg_init(struct netlink_compare_arg *arg, + struct net *net, u32 portid) +{ + memset(arg, 0, sizeof(*arg)); + write_pnet(&arg->pnet, net); + arg->portid = portid; } static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid, struct net *net) { - struct netlink_compare_arg arg = { - .net = net, - .portid = portid, - }; + struct netlink_compare_arg arg; - return rhashtable_lookup_compare(&table->hash, &portid, - &netlink_compare, &arg); + netlink_compare_arg_init(&arg, net, portid); + return rhashtable_lookup_fast(&table->hash, &arg, + netlink_rhashtable_params); } -static bool __netlink_insert(struct netlink_table *table, struct sock *sk) +static int __netlink_insert(struct netlink_table *table, struct sock *sk) { - struct netlink_compare_arg arg = { - .net = sock_net(sk), - .portid = nlk_sk(sk)->portid, - }; + struct netlink_compare_arg arg; - return rhashtable_lookup_compare_insert(&table->hash, - &nlk_sk(sk)->node, - &netlink_compare, &arg); + netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid); + return rhashtable_lookup_insert_key(&table->hash, &arg, + &nlk_sk(sk)->node, + netlink_rhashtable_params); } static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) @@ -1066,9 +1077,10 @@ static int netlink_insert(struct sock *sk, u32 portid) nlk_sk(sk)->portid = portid; sock_hold(sk); - err = 0; - if (!__netlink_insert(table, sk)) { - err = -EADDRINUSE; + err = __netlink_insert(table, sk); + if (err) { + if (err == -EEXIST) + err = -EADDRINUSE; sock_put(sk); } @@ -1082,7 +1094,8 @@ static void netlink_remove(struct sock *sk) struct netlink_table *table; table = &nl_table[sk->sk_protocol]; - if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) { + if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node, + netlink_rhashtable_params)) { WARN_ON(atomic_read(&sk->sk_refcnt) == 1); __sock_put(sk); } @@ -3114,17 +3127,28 @@ static struct pernet_operations __net_initdata netlink_net_ops = { .exit = netlink_net_exit, }; +static inline u32 netlink_hash(const void *data, u32 seed) +{ + const struct netlink_sock *nlk = data; + struct netlink_compare_arg arg; + + netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid); + return jhash(&arg, netlink_compare_arg_len, seed); +} + +static const struct rhashtable_params netlink_rhashtable_params = { + .head_offset = offsetof(struct netlink_sock, node), + .key_len = netlink_compare_arg_len, + .hashfn = jhash, + .obj_hashfn = netlink_hash, + .obj_cmpfn = netlink_compare, + .max_size = 65536, +}; + static int __init netlink_proto_init(void) { int i; int err = proto_register(&netlink_proto, 0); - struct rhashtable_params ht_params = { - .head_offset = offsetof(struct netlink_sock, node), - .key_offset = offsetof(struct netlink_sock, portid), - .key_len = sizeof(u32), /* portid */ - .hashfn = jhash, - .max_shift = 16, /* 64K */ - }; if (err != 0) goto out; @@ -3136,7 +3160,8 @@ static int __init netlink_proto_init(void) goto panic; for (i = 0; i < MAX_LINKS; i++) { - if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) { + if (rhashtable_init(&nl_table[i].hash, + &netlink_rhashtable_params) < 0) { while (--i > 0) rhashtable_destroy(&nl_table[i].hash); kfree(nl_table); diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 5bae7243c577..096c6276e6b9 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -203,7 +203,6 @@ static void destroy_dp_rcu(struct rcu_head *rcu) ovs_flow_tbl_destroy(&dp->table); free_percpu(dp->stats_percpu); - release_net(ovs_dp_get_net(dp)); kfree(dp->ports); kfree(dp); } @@ -1501,7 +1500,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) if (dp == NULL) goto err_free_reply; - ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); + ovs_dp_set_net(dp, sock_net(skb->sk)); /* Allocate table. */ err = ovs_flow_tbl_init(&dp->table); @@ -1575,7 +1574,6 @@ err_destroy_percpu: err_destroy_table: ovs_flow_tbl_destroy(&dp->table); err_free_dp: - release_net(ovs_dp_get_net(dp)); kfree(dp); err_free_reply: kfree_skb(reply); diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 3ece94563079..4ec4a480b147 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -84,10 +84,8 @@ struct datapath { /* Stats. */ struct dp_stats_percpu __percpu *stats_percpu; -#ifdef CONFIG_NET_NS /* Network namespace ref. */ - struct net *net; -#endif + possible_net_t net; u32 user_features; }; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index b91ac5946ad1..5102c3cc4eec 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1916,14 +1916,19 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, } } - if (skb->ip_summed == CHECKSUM_PARTIAL) - status |= TP_STATUS_CSUMNOTREADY; - snaplen = skb->len; res = run_filter(skb, sk, snaplen); if (!res) goto drop_n_restore; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + status |= TP_STATUS_CSUMNOTREADY; + else if (skb->pkt_type != PACKET_OUTGOING && + (skb->ip_summed == CHECKSUM_COMPLETE || + skb_csum_unnecessary(skb))) + status |= TP_STATUS_CSUM_VALID; + if (snaplen > res) snaplen = res; @@ -3030,6 +3035,11 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, aux.tp_status = TP_STATUS_USER; if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; + else if (skb->pkt_type != PACKET_OUTGOING && + (skb->ip_summed == CHECKSUM_COMPLETE || + skb_csum_unnecessary(skb))) + aux.tp_status |= TP_STATUS_CSUM_VALID; + aux.tp_len = origlen; aux.tp_snaplen = skb->len; aux.tp_mac = 0; diff --git a/net/packet/internal.h b/net/packet/internal.h index cdddf6a30399..fe6e20caea1d 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -74,9 +74,7 @@ extern struct mutex fanout_mutex; #define PACKET_FANOUT_MAX 256 struct packet_fanout { -#ifdef CONFIG_NET_NS - struct net *net; -#endif + possible_net_t net; unsigned int num_members; u16 id; u8 type; diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index a817705ce2d0..dba8d0864f18 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c @@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, int *unpinned); static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); -static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) +static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst, + struct rds_iw_device **rds_iwdev, + struct rdma_cm_id **cm_id) { struct rds_iw_device *iwdev; struct rds_iw_cm_id *i_cm_id; @@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd src_addr->sin_port, dst_addr->sin_addr.s_addr, dst_addr->sin_port, - rs->rs_bound_addr, - rs->rs_bound_port, - rs->rs_conn_addr, - rs->rs_conn_port); + src->sin_addr.s_addr, + src->sin_port, + dst->sin_addr.s_addr, + dst->sin_port); #ifdef WORKING_TUPLE_DETECTION - if (src_addr->sin_addr.s_addr == rs->rs_bound_addr && - src_addr->sin_port == rs->rs_bound_port && - dst_addr->sin_addr.s_addr == rs->rs_conn_addr && - dst_addr->sin_port == rs->rs_conn_port) { + if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr && + src_addr->sin_port == src->sin_port && + dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr && + dst_addr->sin_port == dst->sin_port) { #else /* FIXME - needs to compare the local and remote * ipaddr/port tuple, but the ipaddr is the only @@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd * zero'ed. It doesn't appear to be properly populated * during connection setup... */ - if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) { + if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) { #endif spin_unlock_irq(&iwdev->spinlock); *rds_iwdev = iwdev; @@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i { struct sockaddr_in *src_addr, *dst_addr; struct rds_iw_device *rds_iwdev_old; - struct rds_sock rs; struct rdma_cm_id *pcm_id; int rc; src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; - rs.rs_bound_addr = src_addr->sin_addr.s_addr; - rs.rs_bound_port = src_addr->sin_port; - rs.rs_conn_addr = dst_addr->sin_addr.s_addr; - rs.rs_conn_port = dst_addr->sin_port; - - rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id); + rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id); if (rc) rds_iw_remove_cm_id(rds_iwdev, cm_id); @@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, struct rds_iw_device *rds_iwdev; struct rds_iw_mr *ibmr = NULL; struct rdma_cm_id *cm_id; + struct sockaddr_in src = { + .sin_addr.s_addr = rs->rs_bound_addr, + .sin_port = rs->rs_bound_port, + }; + struct sockaddr_in dst = { + .sin_addr.s_addr = rs->rs_conn_addr, + .sin_port = rs->rs_conn_port, + }; int ret; - ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id); + ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id); if (ret || !cm_id) { ret = -ENODEV; goto out; diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index a4f883e2d66f..b92beded7459 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (!skb) { /* nothing remains on the queue */ if (copied && - (msg->msg_flags & MSG_PEEK || timeo == 0)) + (flags & MSG_PEEK || timeo == 0)) goto out; /* wait for a message to turn up */ diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 82c5d7fc1988..4d2cede17468 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -13,71 +13,140 @@ #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/filter.h> +#include <linux/bpf.h> + #include <net/netlink.h> #include <net/pkt_sched.h> #include <linux/tc_act/tc_bpf.h> #include <net/tc_act/tc_bpf.h> -#define BPF_TAB_MASK 15 +#define BPF_TAB_MASK 15 +#define ACT_BPF_NAME_LEN 256 + +struct tcf_bpf_cfg { + struct bpf_prog *filter; + struct sock_filter *bpf_ops; + char *bpf_name; + u32 bpf_fd; + u16 bpf_num_ops; +}; -static int tcf_bpf(struct sk_buff *skb, const struct tc_action *a, +static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, struct tcf_result *res) { - struct tcf_bpf *b = a->priv; - int action; - int filter_res; - - spin_lock(&b->tcf_lock); - b->tcf_tm.lastuse = jiffies; - bstats_update(&b->tcf_bstats, skb); - action = b->tcf_action; - - filter_res = BPF_PROG_RUN(b->filter, skb); - if (filter_res == 0) { - /* Return code 0 from the BPF program - * is being interpreted as a drop here. - */ - action = TC_ACT_SHOT; - b->tcf_qstats.drops++; + struct tcf_bpf *prog = act->priv; + int action, filter_res; + + spin_lock(&prog->tcf_lock); + + prog->tcf_tm.lastuse = jiffies; + bstats_update(&prog->tcf_bstats, skb); + + /* Needed here for accessing maps. */ + rcu_read_lock(); + filter_res = BPF_PROG_RUN(prog->filter, skb); + rcu_read_unlock(); + + /* A BPF program may overwrite the default action opcode. + * Similarly as in cls_bpf, if filter_res == -1 we use the + * default action specified from tc. + * + * In case a different well-known TC_ACT opcode has been + * returned, it will overwrite the default one. + * + * For everything else that is unkown, TC_ACT_UNSPEC is + * returned. + */ + switch (filter_res) { + case TC_ACT_PIPE: + case TC_ACT_RECLASSIFY: + case TC_ACT_OK: + action = filter_res; + break; + case TC_ACT_SHOT: + action = filter_res; + prog->tcf_qstats.drops++; + break; + case TC_ACT_UNSPEC: + action = prog->tcf_action; + break; + default: + action = TC_ACT_UNSPEC; + break; } - spin_unlock(&b->tcf_lock); + spin_unlock(&prog->tcf_lock); return action; } -static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *a, +static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog) +{ + return !prog->bpf_ops; +} + +static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog, + struct sk_buff *skb) +{ + struct nlattr *nla; + + if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops)) + return -EMSGSIZE; + + nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops * + sizeof(struct sock_filter)); + if (nla == NULL) + return -EMSGSIZE; + + memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); + + return 0; +} + +static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog, + struct sk_buff *skb) +{ + if (nla_put_u32(skb, TCA_ACT_BPF_FD, prog->bpf_fd)) + return -EMSGSIZE; + + if (prog->bpf_name && + nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name)) + return -EMSGSIZE; + + return 0; +} + +static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref) { unsigned char *tp = skb_tail_pointer(skb); - struct tcf_bpf *b = a->priv; + struct tcf_bpf *prog = act->priv; struct tc_act_bpf opt = { - .index = b->tcf_index, - .refcnt = b->tcf_refcnt - ref, - .bindcnt = b->tcf_bindcnt - bind, - .action = b->tcf_action, + .index = prog->tcf_index, + .refcnt = prog->tcf_refcnt - ref, + .bindcnt = prog->tcf_bindcnt - bind, + .action = prog->tcf_action, }; - struct tcf_t t; - struct nlattr *nla; + struct tcf_t tm; + int ret; if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt)) goto nla_put_failure; - if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, b->bpf_num_ops)) - goto nla_put_failure; - - nla = nla_reserve(skb, TCA_ACT_BPF_OPS, b->bpf_num_ops * - sizeof(struct sock_filter)); - if (!nla) + if (tcf_bpf_is_ebpf(prog)) + ret = tcf_bpf_dump_ebpf_info(prog, skb); + else + ret = tcf_bpf_dump_bpf_info(prog, skb); + if (ret) goto nla_put_failure; - memcpy(nla_data(nla), b->bpf_ops, nla_len(nla)); + tm.install = jiffies_to_clock_t(jiffies - prog->tcf_tm.install); + tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse); + tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires); - t.install = jiffies_to_clock_t(jiffies - b->tcf_tm.install); - t.lastuse = jiffies_to_clock_t(jiffies - b->tcf_tm.lastuse); - t.expires = jiffies_to_clock_t(b->tcf_tm.expires); - if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(t), &t)) + if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm)) goto nla_put_failure; + return skb->len; nla_put_failure: @@ -87,36 +156,21 @@ nla_put_failure: static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = { [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) }, + [TCA_ACT_BPF_FD] = { .type = NLA_U32 }, + [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING, .len = ACT_BPF_NAME_LEN }, [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 }, [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY, .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, }; -static int tcf_bpf_init(struct net *net, struct nlattr *nla, - struct nlattr *est, struct tc_action *a, - int ovr, int bind) +static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg) { - struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; - struct tc_act_bpf *parm; - struct tcf_bpf *b; - u16 bpf_size, bpf_num_ops; struct sock_filter *bpf_ops; - struct sock_fprog_kern tmp; + struct sock_fprog_kern fprog_tmp; struct bpf_prog *fp; + u16 bpf_size, bpf_num_ops; int ret; - if (!nla) - return -EINVAL; - - ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy); - if (ret < 0) - return ret; - - if (!tb[TCA_ACT_BPF_PARMS] || - !tb[TCA_ACT_BPF_OPS_LEN] || !tb[TCA_ACT_BPF_OPS]) - return -EINVAL; - parm = nla_data(tb[TCA_ACT_BPF_PARMS]); - bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]); if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) return -EINVAL; @@ -126,68 +180,165 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, return -EINVAL; bpf_ops = kzalloc(bpf_size, GFP_KERNEL); - if (!bpf_ops) + if (bpf_ops == NULL) return -ENOMEM; memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size); - tmp.len = bpf_num_ops; - tmp.filter = bpf_ops; + fprog_tmp.len = bpf_num_ops; + fprog_tmp.filter = bpf_ops; - ret = bpf_prog_create(&fp, &tmp); - if (ret) - goto free_bpf_ops; + ret = bpf_prog_create(&fp, &fprog_tmp); + if (ret < 0) { + kfree(bpf_ops); + return ret; + } - if (!tcf_hash_check(parm->index, a, bind)) { - ret = tcf_hash_create(parm->index, est, a, sizeof(*b), bind); - if (ret) + cfg->bpf_ops = bpf_ops; + cfg->bpf_num_ops = bpf_num_ops; + cfg->filter = fp; + + return 0; +} + +static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) +{ + struct bpf_prog *fp; + char *name = NULL; + u32 bpf_fd; + + bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]); + + fp = bpf_prog_get(bpf_fd); + if (IS_ERR(fp)) + return PTR_ERR(fp); + + if (fp->type != BPF_PROG_TYPE_SCHED_ACT) { + bpf_prog_put(fp); + return -EINVAL; + } + + if (tb[TCA_ACT_BPF_NAME]) { + name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]), + nla_len(tb[TCA_ACT_BPF_NAME]), + GFP_KERNEL); + if (!name) { + bpf_prog_put(fp); + return -ENOMEM; + } + } + + cfg->bpf_fd = bpf_fd; + cfg->bpf_name = name; + cfg->filter = fp; + + return 0; +} + +static int tcf_bpf_init(struct net *net, struct nlattr *nla, + struct nlattr *est, struct tc_action *act, + int replace, int bind) +{ + struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; + struct tc_act_bpf *parm; + struct tcf_bpf *prog; + struct tcf_bpf_cfg cfg; + bool is_bpf, is_ebpf; + int ret; + + if (!nla) + return -EINVAL; + + ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy); + if (ret < 0) + return ret; + + is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; + is_ebpf = tb[TCA_ACT_BPF_FD]; + + if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) || + !tb[TCA_ACT_BPF_PARMS]) + return -EINVAL; + + parm = nla_data(tb[TCA_ACT_BPF_PARMS]); + + memset(&cfg, 0, sizeof(cfg)); + + ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : + tcf_bpf_init_from_efd(tb, &cfg); + if (ret < 0) + return ret; + + if (!tcf_hash_check(parm->index, act, bind)) { + ret = tcf_hash_create(parm->index, est, act, + sizeof(*prog), bind); + if (ret < 0) goto destroy_fp; ret = ACT_P_CREATED; } else { + /* Don't override defaults. */ if (bind) goto destroy_fp; - tcf_hash_release(a, bind); - if (!ovr) { + + tcf_hash_release(act, bind); + if (!replace) { ret = -EEXIST; goto destroy_fp; } } - b = to_bpf(a); - spin_lock_bh(&b->tcf_lock); - b->tcf_action = parm->action; - b->bpf_num_ops = bpf_num_ops; - b->bpf_ops = bpf_ops; - b->filter = fp; - spin_unlock_bh(&b->tcf_lock); + prog = to_bpf(act); + spin_lock_bh(&prog->tcf_lock); + + prog->bpf_ops = cfg.bpf_ops; + prog->bpf_name = cfg.bpf_name; + + if (cfg.bpf_num_ops) + prog->bpf_num_ops = cfg.bpf_num_ops; + if (cfg.bpf_fd) + prog->bpf_fd = cfg.bpf_fd; + + prog->tcf_action = parm->action; + prog->filter = cfg.filter; + + spin_unlock_bh(&prog->tcf_lock); if (ret == ACT_P_CREATED) - tcf_hash_insert(a); + tcf_hash_insert(act); + return ret; destroy_fp: - bpf_prog_destroy(fp); -free_bpf_ops: - kfree(bpf_ops); + if (is_ebpf) + bpf_prog_put(cfg.filter); + else + bpf_prog_destroy(cfg.filter); + + kfree(cfg.bpf_ops); + kfree(cfg.bpf_name); + return ret; } -static void tcf_bpf_cleanup(struct tc_action *a, int bind) +static void tcf_bpf_cleanup(struct tc_action *act, int bind) { - struct tcf_bpf *b = a->priv; + const struct tcf_bpf *prog = act->priv; - bpf_prog_destroy(b->filter); + if (tcf_bpf_is_ebpf(prog)) + bpf_prog_put(prog->filter); + else + bpf_prog_destroy(prog->filter); } -static struct tc_action_ops act_bpf_ops = { - .kind = "bpf", - .type = TCA_ACT_BPF, - .owner = THIS_MODULE, - .act = tcf_bpf, - .dump = tcf_bpf_dump, - .cleanup = tcf_bpf_cleanup, - .init = tcf_bpf_init, +static struct tc_action_ops act_bpf_ops __read_mostly = { + .kind = "bpf", + .type = TCA_ACT_BPF, + .owner = THIS_MODULE, + .act = tcf_bpf, + .dump = tcf_bpf_dump, + .cleanup = tcf_bpf_cleanup, + .init = tcf_bpf_init, }; static int __init bpf_init_module(void) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 243c9f225a73..5c4171c5d2bd 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -64,8 +64,10 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, { struct cls_bpf_head *head = rcu_dereference_bh(tp->root); struct cls_bpf_prog *prog; - int ret; + int ret = -1; + /* Needed here for accessing maps. */ + rcu_read_lock(); list_for_each_entry_rcu(prog, &head->plist, link) { int filter_res = BPF_PROG_RUN(prog->filter, skb); @@ -80,10 +82,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, if (ret < 0) continue; - return ret; + break; } + rcu_read_unlock(); - return -1; + return ret; } static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 375e51b71c80..cab9e9b43967 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -78,8 +78,11 @@ struct tc_u_hnode { struct tc_u_common *tp_c; int refcnt; unsigned int divisor; - struct tc_u_knode __rcu *ht[1]; struct rcu_head rcu; + /* The 'ht' field MUST be the last field in structure to allow for + * more entries allocated at end of structure. + */ + struct tc_u_knode __rcu *ht[1]; }; struct tc_u_common { diff --git a/net/socket.c b/net/socket.c index 95d3085cb477..073809f4125f 100644 --- a/net/socket.c +++ b/net/socket.c @@ -798,7 +798,8 @@ static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct socket *sock = file->private_data; - struct msghdr msg = {.msg_iter = *to}; + struct msghdr msg = {.msg_iter = *to, + .msg_iocb = iocb}; ssize_t res; if (file->f_flags & O_NONBLOCK) @@ -819,7 +820,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct socket *sock = file->private_data; - struct msghdr msg = {.msg_iter = *from}; + struct msghdr msg = {.msg_iter = *from, + .msg_iocb = iocb}; ssize_t res; if (iocb->ki_pos != 0) @@ -1650,6 +1652,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, if (len > INT_MAX) len = INT_MAX; + if (unlikely(!access_ok(VERIFY_READ, buff, len))) + return -EFAULT; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; @@ -1708,6 +1712,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, if (size > INT_MAX) size = INT_MAX; + if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size))) + return -EFAULT; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; @@ -1890,6 +1896,8 @@ static ssize_t copy_msghdr_from_user(struct msghdr *kmsg, if (nr_segs > UIO_MAXIOV) return -EMSGSIZE; + kmsg->msg_iocb = NULL; + err = rw_copy_check_uvector(save_addr ? READ : WRITE, uiov, nr_segs, UIO_FASTIOV, *iov, iov); diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 8cf42a69baf4..46568b85c333 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -28,11 +28,11 @@ int netdev_switch_parent_id_get(struct net_device *dev, struct netdev_phys_item_id *psid) { - const struct net_device_ops *ops = dev->netdev_ops; + const struct swdev_ops *ops = dev->swdev_ops; - if (!ops->ndo_switch_parent_id_get) + if (!ops || !ops->swdev_parent_id_get) return -EOPNOTSUPP; - return ops->ndo_switch_parent_id_get(dev, psid); + return ops->swdev_parent_id_get(dev, psid); } EXPORT_SYMBOL_GPL(netdev_switch_parent_id_get); @@ -46,12 +46,21 @@ EXPORT_SYMBOL_GPL(netdev_switch_parent_id_get); */ int netdev_switch_port_stp_update(struct net_device *dev, u8 state) { - const struct net_device_ops *ops = dev->netdev_ops; + const struct swdev_ops *ops = dev->swdev_ops; + struct net_device *lower_dev; + struct list_head *iter; + int err = -EOPNOTSUPP; - if (!ops->ndo_switch_port_stp_update) - return -EOPNOTSUPP; - WARN_ON(!ops->ndo_switch_parent_id_get); - return ops->ndo_switch_port_stp_update(dev, state); + if (ops && ops->swdev_port_stp_update) + return ops->swdev_port_stp_update(dev, state); + + netdev_for_each_lower_dev(dev, lower_dev, iter) { + err = netdev_switch_port_stp_update(lower_dev, state); + if (err && err != -EOPNOTSUPP) + return err; + } + + return err; } EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update); @@ -59,7 +68,7 @@ static DEFINE_MUTEX(netdev_switch_mutex); static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain); /** - * register_netdev_switch_notifier - Register nofifier + * register_netdev_switch_notifier - Register notifier * @nb: notifier_block * * Register switch device notifier. This should be used by code @@ -78,7 +87,7 @@ int register_netdev_switch_notifier(struct notifier_block *nb) EXPORT_SYMBOL_GPL(register_netdev_switch_notifier); /** - * unregister_netdev_switch_notifier - Unregister nofifier + * unregister_netdev_switch_notifier - Unregister notifier * @nb: notifier_block * * Unregister switch device notifier. @@ -96,7 +105,7 @@ int unregister_netdev_switch_notifier(struct notifier_block *nb) EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier); /** - * call_netdev_switch_notifiers - Call nofifiers + * call_netdev_switch_notifiers - Call notifiers * @val: value passed unmodified to notifier function * @dev: port device * @info: notifier information data @@ -230,17 +239,17 @@ EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_dellink); static struct net_device *netdev_switch_get_lowest_dev(struct net_device *dev) { - const struct net_device_ops *ops = dev->netdev_ops; + const struct swdev_ops *ops = dev->swdev_ops; struct net_device *lower_dev; struct net_device *port_dev; struct list_head *iter; /* Recusively search down until we find a sw port dev. - * (A sw port dev supports ndo_switch_parent_id_get). + * (A sw port dev supports swdev_parent_id_get). */ if (dev->features & NETIF_F_HW_SWITCH_OFFLOAD && - ops->ndo_switch_parent_id_get) + ops && ops->swdev_parent_id_get) return dev; netdev_for_each_lower_dev(dev, lower_dev, iter) { @@ -304,7 +313,7 @@ int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, u8 tos, u8 type, u32 nlflags, u32 tb_id) { struct net_device *dev; - const struct net_device_ops *ops; + const struct swdev_ops *ops; int err = 0; /* Don't offload route if using custom ip rules or if @@ -322,12 +331,12 @@ int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, dev = netdev_switch_get_dev_by_nhs(fi); if (!dev) return 0; - ops = dev->netdev_ops; + ops = dev->swdev_ops; - if (ops->ndo_switch_fib_ipv4_add) { - err = ops->ndo_switch_fib_ipv4_add(dev, htonl(dst), dst_len, - fi, tos, type, nlflags, - tb_id); + if (ops->swdev_fib_ipv4_add) { + err = ops->swdev_fib_ipv4_add(dev, htonl(dst), dst_len, + fi, tos, type, nlflags, + tb_id); if (!err) fi->fib_flags |= RTNH_F_EXTERNAL; } @@ -352,7 +361,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, u8 tos, u8 type, u32 tb_id) { struct net_device *dev; - const struct net_device_ops *ops; + const struct swdev_ops *ops; int err = 0; if (!(fi->fib_flags & RTNH_F_EXTERNAL)) @@ -361,11 +370,11 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, dev = netdev_switch_get_dev_by_nhs(fi); if (!dev) return 0; - ops = dev->netdev_ops; + ops = dev->swdev_ops; - if (ops->ndo_switch_fib_ipv4_del) { - err = ops->ndo_switch_fib_ipv4_del(dev, htonl(dst), dst_len, - fi, tos, type, tb_id); + if (ops->swdev_fib_ipv4_del) { + err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len, + fi, tos, type, tb_id); if (!err) fi->fib_flags &= ~RTNH_F_EXTERNAL; } diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 3e41704832de..5aff0844d4d3 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -135,9 +135,10 @@ static void bclink_set_last_sent(struct net *net) { struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_link *bcl = tn->bcl; + struct sk_buff *skb = skb_peek(&bcl->backlogq); - if (bcl->next_out) - bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1); + if (skb) + bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1); else bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); } @@ -180,7 +181,7 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to) struct sk_buff *skb; struct tipc_link *bcl = tn->bcl; - skb_queue_walk(&bcl->outqueue, skb) { + skb_queue_walk(&bcl->transmq, skb) { if (more(buf_seqno(skb), after)) { tipc_link_retransmit(bcl, skb, mod(to - after)); break; @@ -210,14 +211,17 @@ void tipc_bclink_wakeup_users(struct net *net) void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) { struct sk_buff *skb, *tmp; - struct sk_buff *next; unsigned int released = 0; struct net *net = n_ptr->net; struct tipc_net *tn = net_generic(net, tipc_net_id); + if (unlikely(!n_ptr->bclink.recv_permitted)) + return; + tipc_bclink_lock(net); + /* Bail out if tx queue is empty (no clean up is required) */ - skb = skb_peek(&tn->bcl->outqueue); + skb = skb_peek(&tn->bcl->transmq); if (!skb) goto exit; @@ -244,27 +248,19 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) } /* Skip over packets that node has previously acknowledged */ - skb_queue_walk(&tn->bcl->outqueue, skb) { + skb_queue_walk(&tn->bcl->transmq, skb) { if (more(buf_seqno(skb), n_ptr->bclink.acked)) break; } /* Update packets that node is now acknowledging */ - skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) { + skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) { if (more(buf_seqno(skb), acked)) break; - - next = tipc_skb_queue_next(&tn->bcl->outqueue, skb); - if (skb != tn->bcl->next_out) { - bcbuf_decr_acks(skb); - } else { - bcbuf_set_acks(skb, 0); - tn->bcl->next_out = next; - bclink_set_last_sent(net); - } - + bcbuf_decr_acks(skb); + bclink_set_last_sent(net); if (bcbuf_acks(skb) == 0) { - __skb_unlink(skb, &tn->bcl->outqueue); + __skb_unlink(skb, &tn->bcl->transmq); kfree_skb(skb); released = 1; } @@ -272,7 +268,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) n_ptr->bclink.acked = acked; /* Try resolving broadcast link congestion, if necessary */ - if (unlikely(tn->bcl->next_out)) { + if (unlikely(skb_peek(&tn->bcl->backlogq))) { tipc_link_push_packets(tn->bcl); bclink_set_last_sent(net); } @@ -319,7 +315,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, buf = tipc_buf_acquire(INT_H_SIZE); if (buf) { struct tipc_msg *msg = buf_msg(buf); - struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue); + struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq); u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent; tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG, @@ -387,14 +383,13 @@ int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list) __skb_queue_purge(list); return -EHOSTUNREACH; } - /* Broadcast to all nodes */ if (likely(bclink)) { tipc_bclink_lock(net); if (likely(bclink->bcast_nodes.count)) { rc = __tipc_link_xmit(net, bcl, list); if (likely(!rc)) { - u32 len = skb_queue_len(&bcl->outqueue); + u32 len = skb_queue_len(&bcl->transmq); bclink_set_last_sent(net); bcl->stats.queue_sz_counts++; @@ -559,25 +554,25 @@ receive: if (node->bclink.last_in == node->bclink.last_sent) goto unlock; - if (skb_queue_empty(&node->bclink.deferred_queue)) { + if (skb_queue_empty(&node->bclink.deferdq)) { node->bclink.oos_state = 1; goto unlock; } - msg = buf_msg(skb_peek(&node->bclink.deferred_queue)); + msg = buf_msg(skb_peek(&node->bclink.deferdq)); seqno = msg_seqno(msg); next_in = mod(next_in + 1); if (seqno != next_in) goto unlock; /* Take in-sequence message from deferred queue & deliver it */ - buf = __skb_dequeue(&node->bclink.deferred_queue); + buf = __skb_dequeue(&node->bclink.deferdq); goto receive; } /* Handle out-of-sequence broadcast message */ if (less(next_in, seqno)) { - deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue, + deferred = tipc_link_defer_pkt(&node->bclink.deferdq, buf); bclink_update_last_sent(node, seqno); buf = NULL; @@ -634,7 +629,6 @@ static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf, msg_set_non_seq(msg, 1); msg_set_mc_netid(msg, tn->net_id); tn->bcl->stats.sent_info++; - if (WARN_ON(!bclink->bcast_nodes.count)) { dump_stack(); return 0; @@ -913,8 +907,9 @@ int tipc_bclink_init(struct net *net) sprintf(bcbearer->media.name, "tipc-broadcast"); spin_lock_init(&bclink->lock); - __skb_queue_head_init(&bcl->outqueue); - __skb_queue_head_init(&bcl->deferred_queue); + __skb_queue_head_init(&bcl->transmq); + __skb_queue_head_init(&bcl->backlogq); + __skb_queue_head_init(&bcl->deferdq); skb_queue_head_init(&bcl->wakeupq); bcl->next_out_no = 1; spin_lock_init(&bclink->node.lock); diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 5967506833ce..169f3dd038b9 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -89,6 +89,7 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type, MAX_H_SIZE, dest_domain); msg_set_non_seq(msg, 1); msg_set_node_sig(msg, tn->random); + msg_set_node_capabilities(msg, 0); msg_set_dest_domain(msg, dest_domain); msg_set_bc_netid(msg, tn->net_id); b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr); @@ -133,6 +134,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf, u32 net_id = msg_bc_netid(msg); u32 mtyp = msg_type(msg); u32 signature = msg_node_sig(msg); + u16 caps = msg_node_capabilities(msg); bool addr_match = false; bool sign_match = false; bool link_up = false; @@ -167,6 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf, if (!node) return; tipc_node_lock(node); + node->capabilities = caps; link = node->links[bearer->identity]; /* Prepare to validate requesting node's signature and media address */ diff --git a/net/tipc/link.c b/net/tipc/link.c index 98609fdfb06a..8c98c4d00ad6 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1,7 +1,7 @@ /* * net/tipc/link.c: TIPC link code * - * Copyright (c) 1996-2007, 2012-2014, Ericsson AB + * Copyright (c) 1996-2007, 2012-2015, Ericsson AB * Copyright (c) 2004-2007, 2010-2013, Wind River Systems * All rights reserved. * @@ -35,6 +35,7 @@ */ #include "core.h" +#include "subscr.h" #include "link.h" #include "bcast.h" #include "socket.h" @@ -194,10 +195,10 @@ static void link_timeout(unsigned long data) tipc_node_lock(l_ptr->owner); /* update counters used in statistical profiling of send traffic */ - l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue); + l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq); l_ptr->stats.queue_sz_counts++; - skb = skb_peek(&l_ptr->outqueue); + skb = skb_peek(&l_ptr->transmq); if (skb) { struct tipc_msg *msg = buf_msg(skb); u32 length = msg_size(msg); @@ -229,7 +230,7 @@ static void link_timeout(unsigned long data) /* do all other link processing performed on a periodic basis */ link_state_event(l_ptr, TIMEOUT_EVT); - if (l_ptr->next_out) + if (skb_queue_len(&l_ptr->backlogq)) tipc_link_push_packets(l_ptr); tipc_node_unlock(l_ptr->owner); @@ -305,16 +306,15 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, msg_set_session(msg, (tn->random & 0xffff)); msg_set_bearer_id(msg, b_ptr->identity); strcpy((char *)msg_data(msg), if_name); - - l_ptr->priority = b_ptr->priority; - tipc_link_set_queue_limits(l_ptr, b_ptr->window); - l_ptr->net_plane = b_ptr->net_plane; link_init_max_pkt(l_ptr); + l_ptr->priority = b_ptr->priority; + tipc_link_set_queue_limits(l_ptr, b_ptr->window); l_ptr->next_out_no = 1; - __skb_queue_head_init(&l_ptr->outqueue); - __skb_queue_head_init(&l_ptr->deferred_queue); + __skb_queue_head_init(&l_ptr->transmq); + __skb_queue_head_init(&l_ptr->backlogq); + __skb_queue_head_init(&l_ptr->deferdq); skb_queue_head_init(&l_ptr->wakeupq); skb_queue_head_init(&l_ptr->inputq); skb_queue_head_init(&l_ptr->namedq); @@ -400,7 +400,7 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport, */ void link_prepare_wakeup(struct tipc_link *link) { - uint pend_qsz = skb_queue_len(&link->outqueue); + uint pend_qsz = skb_queue_len(&link->backlogq); struct sk_buff *skb, *tmp; skb_queue_walk_safe(&link->wakeupq, skb, tmp) { @@ -430,8 +430,9 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) */ void tipc_link_purge_queues(struct tipc_link *l_ptr) { - __skb_queue_purge(&l_ptr->deferred_queue); - __skb_queue_purge(&l_ptr->outqueue); + __skb_queue_purge(&l_ptr->deferdq); + __skb_queue_purge(&l_ptr->transmq); + __skb_queue_purge(&l_ptr->backlogq); tipc_link_reset_fragments(l_ptr); } @@ -464,15 +465,15 @@ void tipc_link_reset(struct tipc_link *l_ptr) } /* Clean up all queues, except inputq: */ - __skb_queue_purge(&l_ptr->outqueue); - __skb_queue_purge(&l_ptr->deferred_queue); + __skb_queue_purge(&l_ptr->transmq); + __skb_queue_purge(&l_ptr->backlogq); + __skb_queue_purge(&l_ptr->deferdq); if (!owner->inputq) owner->inputq = &l_ptr->inputq; skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); if (!skb_queue_empty(owner->inputq)) owner->action_flags |= TIPC_MSG_EVT; - l_ptr->next_out = NULL; - l_ptr->unacked_window = 0; + l_ptr->rcv_unacked = 0; l_ptr->checkpoint = 1; l_ptr->next_out_no = 1; l_ptr->fsm_msg_cnt = 0; @@ -706,7 +707,7 @@ static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list) { struct sk_buff *skb = skb_peek(list); struct tipc_msg *msg = buf_msg(skb); - uint imp = tipc_msg_tot_importance(msg); + int imp = msg_importance(msg); u32 oport = msg_tot_origport(msg); if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { @@ -742,54 +743,51 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link, struct sk_buff_head *list) { struct tipc_msg *msg = buf_msg(skb_peek(list)); - uint psz = msg_size(msg); - uint sndlim = link->queue_limit[0]; - uint imp = tipc_msg_tot_importance(msg); + unsigned int maxwin = link->window; + unsigned int imp = msg_importance(msg); uint mtu = link->max_pkt; uint ack = mod(link->next_in_no - 1); uint seqno = link->next_out_no; uint bc_last_in = link->owner->bclink.last_in; struct tipc_media_addr *addr = &link->media_addr; - struct sk_buff_head *outqueue = &link->outqueue; + struct sk_buff_head *transmq = &link->transmq; + struct sk_buff_head *backlogq = &link->backlogq; struct sk_buff *skb, *tmp; - /* Match queue limits against msg importance: */ - if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp])) + /* Match queue limit against msg importance: */ + if (unlikely(skb_queue_len(backlogq) >= link->queue_limit[imp])) return tipc_link_cong(link, list); /* Has valid packet limit been used ? */ - if (unlikely(psz > mtu)) { + if (unlikely(msg_size(msg) > mtu)) { __skb_queue_purge(list); return -EMSGSIZE; } - /* Prepare each packet for sending, and add to outqueue: */ + /* Prepare each packet for sending, and add to relevant queue: */ skb_queue_walk_safe(list, skb, tmp) { __skb_unlink(skb, list); msg = buf_msg(skb); - msg_set_word(msg, 2, ((ack << 16) | mod(seqno))); + msg_set_seqno(msg, seqno); + msg_set_ack(msg, ack); msg_set_bcast_ack(msg, bc_last_in); - if (skb_queue_len(outqueue) < sndlim) { - __skb_queue_tail(outqueue, skb); - tipc_bearer_send(net, link->bearer_id, - skb, addr); - link->next_out = NULL; - link->unacked_window = 0; - } else if (tipc_msg_bundle(outqueue, skb, mtu)) { + if (likely(skb_queue_len(transmq) < maxwin)) { + __skb_queue_tail(transmq, skb); + tipc_bearer_send(net, link->bearer_id, skb, addr); + link->rcv_unacked = 0; + seqno++; + continue; + } + if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) { link->stats.sent_bundled++; continue; - } else if (tipc_msg_make_bundle(outqueue, skb, mtu, - link->addr)) { + } + if (tipc_msg_make_bundle(&skb, mtu, link->addr)) { link->stats.sent_bundled++; link->stats.sent_bundles++; - if (!link->next_out) - link->next_out = skb_peek_tail(outqueue); - } else { - __skb_queue_tail(outqueue, skb); - if (!link->next_out) - link->next_out = skb; } + __skb_queue_tail(backlogq, skb); seqno++; } link->next_out_no = seqno; @@ -847,8 +845,10 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode, if (link) return rc; - if (likely(in_own_node(net, dnode))) - return tipc_sk_rcv(net, list); + if (likely(in_own_node(net, dnode))) { + tipc_sk_rcv(net, list); + return 0; + } __skb_queue_purge(list); return rc; @@ -895,14 +895,6 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) kfree_skb(buf); } -struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, - const struct sk_buff *skb) -{ - if (skb_queue_is_last(list, skb)) - return NULL; - return skb->next; -} - /* * tipc_link_push_packets - push unsent packets to bearer * @@ -911,30 +903,23 @@ struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, * * Called with node locked */ -void tipc_link_push_packets(struct tipc_link *l_ptr) +void tipc_link_push_packets(struct tipc_link *link) { - struct sk_buff_head *outqueue = &l_ptr->outqueue; - struct sk_buff *skb = l_ptr->next_out; + struct sk_buff *skb; struct tipc_msg *msg; - u32 next, first; + unsigned int ack = mod(link->next_in_no - 1); - skb_queue_walk_from(outqueue, skb) { - msg = buf_msg(skb); - next = msg_seqno(msg); - first = buf_seqno(skb_peek(outqueue)); - - if (mod(next - first) < l_ptr->queue_limit[0]) { - msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); - msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); - if (msg_user(msg) == MSG_BUNDLER) - TIPC_SKB_CB(skb)->bundling = false; - tipc_bearer_send(l_ptr->owner->net, - l_ptr->bearer_id, skb, - &l_ptr->media_addr); - l_ptr->next_out = tipc_skb_queue_next(outqueue, skb); - } else { + while (skb_queue_len(&link->transmq) < link->window) { + skb = __skb_dequeue(&link->backlogq); + if (!skb) break; - } + msg = buf_msg(skb); + msg_set_ack(msg, ack); + msg_set_bcast_ack(msg, link->owner->bclink.last_in); + link->rcv_unacked = 0; + __skb_queue_tail(&link->transmq, skb); + tipc_bearer_send(link->owner->net, link->bearer_id, + skb, &link->media_addr); } } @@ -1021,8 +1006,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, l_ptr->stale_count = 1; } - skb_queue_walk_from(&l_ptr->outqueue, skb) { - if (!retransmits || skb == l_ptr->next_out) + skb_queue_walk_from(&l_ptr->transmq, skb) { + if (!retransmits) break; msg = buf_msg(skb); msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); @@ -1039,67 +1024,12 @@ static void link_retrieve_defq(struct tipc_link *link, { u32 seq_no; - if (skb_queue_empty(&link->deferred_queue)) + if (skb_queue_empty(&link->deferdq)) return; - seq_no = buf_seqno(skb_peek(&link->deferred_queue)); + seq_no = buf_seqno(skb_peek(&link->deferdq)); if (seq_no == mod(link->next_in_no)) - skb_queue_splice_tail_init(&link->deferred_queue, list); -} - -/** - * link_recv_buf_validate - validate basic format of received message - * - * This routine ensures a TIPC message has an acceptable header, and at least - * as much data as the header indicates it should. The routine also ensures - * that the entire message header is stored in the main fragment of the message - * buffer, to simplify future access to message header fields. - * - * Note: Having extra info present in the message header or data areas is OK. - * TIPC will ignore the excess, under the assumption that it is optional info - * introduced by a later release of the protocol. - */ -static int link_recv_buf_validate(struct sk_buff *buf) -{ - static u32 min_data_hdr_size[8] = { - SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, - MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE - }; - - struct tipc_msg *msg; - u32 tipc_hdr[2]; - u32 size; - u32 hdr_size; - u32 min_hdr_size; - - /* If this packet comes from the defer queue, the skb has already - * been validated - */ - if (unlikely(TIPC_SKB_CB(buf)->deferred)) - return 1; - - if (unlikely(buf->len < MIN_H_SIZE)) - return 0; - - msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); - if (msg == NULL) - return 0; - - if (unlikely(msg_version(msg) != TIPC_VERSION)) - return 0; - - size = msg_size(msg); - hdr_size = msg_hdr_sz(msg); - min_hdr_size = msg_isdata(msg) ? - min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; - - if (unlikely((hdr_size < min_hdr_size) || - (size < hdr_size) || - (buf->len < size) || - (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) - return 0; - - return pskb_may_pull(buf, hdr_size); + skb_queue_splice_tail_init(&link->deferdq, list); } /** @@ -1127,16 +1057,11 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) while ((skb = __skb_dequeue(&head))) { /* Ensure message is well-formed */ - if (unlikely(!link_recv_buf_validate(skb))) - goto discard; - - /* Ensure message data is a single contiguous unit */ - if (unlikely(skb_linearize(skb))) + if (unlikely(!tipc_msg_validate(skb))) goto discard; /* Handle arrival of a non-unicast link message */ msg = buf_msg(skb); - if (unlikely(msg_non_seq(msg))) { if (msg_user(msg) == LINK_CONFIG) tipc_disc_rcv(net, skb, b_ptr); @@ -1177,21 +1102,20 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) ackd = msg_ack(msg); /* Release acked messages */ - if (n_ptr->bclink.recv_permitted) + if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg))) tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); released = 0; - skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) { - if (skb1 == l_ptr->next_out || - more(buf_seqno(skb1), ackd)) + skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) { + if (more(buf_seqno(skb1), ackd)) break; - __skb_unlink(skb1, &l_ptr->outqueue); + __skb_unlink(skb1, &l_ptr->transmq); kfree_skb(skb1); released = 1; } /* Try sending any messages link endpoint has pending */ - if (unlikely(l_ptr->next_out)) + if (unlikely(skb_queue_len(&l_ptr->backlogq))) tipc_link_push_packets(l_ptr); if (released && !skb_queue_empty(&l_ptr->wakeupq)) @@ -1226,10 +1150,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) goto unlock; } l_ptr->next_in_no++; - if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue))) + if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) link_retrieve_defq(l_ptr, &head); - - if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { + if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) { l_ptr->stats.sent_acks++; tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); } @@ -1396,10 +1319,9 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, return; } - if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) { + if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) { l_ptr->stats.deferred_recv++; - TIPC_SKB_CB(buf)->deferred = true; - if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1) + if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1) tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); } else { l_ptr->stats.duplicates++; @@ -1436,11 +1358,11 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, if (!tipc_link_is_up(l_ptr)) return; - if (l_ptr->next_out) - next_sent = buf_seqno(l_ptr->next_out); + if (skb_queue_len(&l_ptr->backlogq)) + next_sent = buf_seqno(skb_peek(&l_ptr->backlogq)); msg_set_next_sent(msg, next_sent); - if (!skb_queue_empty(&l_ptr->deferred_queue)) { - u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue)); + if (!skb_queue_empty(&l_ptr->deferdq)) { + u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq)); gap = mod(rec - mod(l_ptr->next_in_no)); } msg_set_seq_gap(msg, gap); @@ -1492,10 +1414,9 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); buf->priority = TC_PRIO_CONTROL; - tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf, &l_ptr->media_addr); - l_ptr->unacked_window = 0; + l_ptr->rcv_unacked = 0; kfree_skb(buf); } @@ -1630,7 +1551,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, } if (msg_seq_gap(msg)) { l_ptr->stats.recv_nacks++; - tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue), + tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq), msg_seq_gap(msg)); } break; @@ -1677,7 +1598,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, */ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) { - u32 msgcount = skb_queue_len(&l_ptr->outqueue); + int msgcount; struct tipc_link *tunnel = l_ptr->owner->active_links[0]; struct tipc_msg tunnel_hdr; struct sk_buff *skb; @@ -1688,10 +1609,12 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); + skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); + msgcount = skb_queue_len(&l_ptr->transmq); msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); msg_set_msgcnt(&tunnel_hdr, msgcount); - if (skb_queue_empty(&l_ptr->outqueue)) { + if (skb_queue_empty(&l_ptr->transmq)) { skb = tipc_buf_acquire(INT_H_SIZE); if (skb) { skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); @@ -1707,7 +1630,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) split_bundles = (l_ptr->owner->active_links[0] != l_ptr->owner->active_links[1]); - skb_queue_walk(&l_ptr->outqueue, skb) { + skb_queue_walk(&l_ptr->transmq, skb) { struct tipc_msg *msg = buf_msg(skb); if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { @@ -1738,80 +1661,66 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) * and sequence order is preserved per sender/receiver socket pair. * Owner node is locked. */ -void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, - struct tipc_link *tunnel) +void tipc_link_dup_queue_xmit(struct tipc_link *link, + struct tipc_link *tnl) { struct sk_buff *skb; - struct tipc_msg tunnel_hdr; - - tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, - DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); - msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue)); - msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); - skb_queue_walk(&l_ptr->outqueue, skb) { + struct tipc_msg tnl_hdr; + struct sk_buff_head *queue = &link->transmq; + int mcnt; + + tipc_msg_init(link_own_addr(link), &tnl_hdr, CHANGEOVER_PROTOCOL, + DUPLICATE_MSG, INT_H_SIZE, link->addr); + mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq); + msg_set_msgcnt(&tnl_hdr, mcnt); + msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id); + +tunnel_queue: + skb_queue_walk(queue, skb) { struct sk_buff *outskb; struct tipc_msg *msg = buf_msg(skb); - u32 length = msg_size(msg); + u32 len = msg_size(msg); - if (msg_user(msg) == MSG_BUNDLER) - msg_set_type(msg, CLOSED_MSG); - msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ - msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); - msg_set_size(&tunnel_hdr, length + INT_H_SIZE); - outskb = tipc_buf_acquire(length + INT_H_SIZE); + msg_set_ack(msg, mod(link->next_in_no - 1)); + msg_set_bcast_ack(msg, link->owner->bclink.last_in); + msg_set_size(&tnl_hdr, len + INT_H_SIZE); + outskb = tipc_buf_acquire(len + INT_H_SIZE); if (outskb == NULL) { pr_warn("%sunable to send duplicate msg\n", link_co_err); return; } - skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE); - skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data, - length); - __tipc_link_xmit_skb(tunnel, outskb); - if (!tipc_link_is_up(l_ptr)) + skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE); + skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, + skb->data, len); + __tipc_link_xmit_skb(tnl, outskb); + if (!tipc_link_is_up(link)) return; } -} - -/** - * buf_extract - extracts embedded TIPC message from another message - * @skb: encapsulating message buffer - * @from_pos: offset to extract from - * - * Returns a new message buffer containing an embedded message. The - * encapsulating buffer is left unchanged. - */ -static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) -{ - struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); - u32 size = msg_size(msg); - struct sk_buff *eb; - - eb = tipc_buf_acquire(size); - if (eb) - skb_copy_to_linear_data(eb, msg, size); - return eb; + if (queue == &link->backlogq) + return; + queue = &link->backlogq; + goto tunnel_queue; } /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet. * Owner node is locked. */ -static void tipc_link_dup_rcv(struct tipc_link *l_ptr, - struct sk_buff *t_buf) +static void tipc_link_dup_rcv(struct tipc_link *link, + struct sk_buff *skb) { - struct sk_buff *buf; + struct sk_buff *iskb; + int pos = 0; - if (!tipc_link_is_up(l_ptr)) + if (!tipc_link_is_up(link)) return; - buf = buf_extract(t_buf, INT_H_SIZE); - if (buf == NULL) { + if (!tipc_msg_extract(skb, &iskb, &pos)) { pr_warn("%sfailed to extract inner dup pkt\n", link_co_err); return; } - - /* Add buffer to deferred queue, if applicable: */ - link_handle_out_of_seq_msg(l_ptr, buf); + /* Append buffer to deferred queue, if applicable: */ + link_handle_out_of_seq_msg(link, iskb); } /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet @@ -1823,6 +1732,7 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr, struct tipc_msg *t_msg = buf_msg(t_buf); struct sk_buff *buf = NULL; struct tipc_msg *msg; + int pos = 0; if (tipc_link_is_up(l_ptr)) tipc_link_reset(l_ptr); @@ -1834,8 +1744,7 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr, /* Should there be an inner packet? */ if (l_ptr->exp_msg_count) { l_ptr->exp_msg_count--; - buf = buf_extract(t_buf, INT_H_SIZE); - if (buf == NULL) { + if (!tipc_msg_extract(t_buf, &buf, &pos)) { pr_warn("%sno inner failover pkt\n", link_co_err); goto exit; } @@ -1903,23 +1812,16 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4); } -void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) +void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) { - /* Data messages from this node, inclusive FIRST_FRAGM */ - l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; - l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; - l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; - l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; - /* Transiting data messages,inclusive FIRST_FRAGM */ - l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; - l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; - l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; - l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; - l_ptr->queue_limit[CONN_MANAGER] = 1200; - l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; - l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; - /* FRAGMENT and LAST_FRAGMENT packets */ - l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; + int max_bulk = TIPC_MAX_PUBLICATIONS / (l->max_pkt / ITEM_SIZE); + + l->window = win; + l->queue_limit[TIPC_LOW_IMPORTANCE] = win / 2; + l->queue_limit[TIPC_MEDIUM_IMPORTANCE] = win; + l->queue_limit[TIPC_HIGH_IMPORTANCE] = win / 2 * 3; + l->queue_limit[TIPC_CRITICAL_IMPORTANCE] = win * 2; + l->queue_limit[TIPC_SYSTEM_IMPORTANCE] = max_bulk; } /* tipc_link_find_owner - locate owner node of link by link's name diff --git a/net/tipc/link.h b/net/tipc/link.h index 7aeb52092bf3..eec3ecf2d450 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -124,7 +124,8 @@ struct tipc_stats { * @max_pkt: current maximum packet size for this link * @max_pkt_target: desired maximum packet size for this link * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target) - * @outqueue: outbound message queue + * @transmitq: queue for sent, non-acked messages + * @backlogq: queue for messages waiting to be sent * @next_out_no: next sequence number to use for outbound messages * @last_retransmitted: sequence number of most recently retransmitted message * @stale_count: # of identical retransmit requests made by peer @@ -177,20 +178,21 @@ struct tipc_link { u32 max_pkt_probes; /* Sending */ - struct sk_buff_head outqueue; + struct sk_buff_head transmq; + struct sk_buff_head backlogq; u32 next_out_no; + u32 window; u32 last_retransmitted; u32 stale_count; /* Reception */ u32 next_in_no; - struct sk_buff_head deferred_queue; - u32 unacked_window; + u32 rcv_unacked; + struct sk_buff_head deferdq; struct sk_buff_head inputq; struct sk_buff_head namedq; /* Congestion handling */ - struct sk_buff *next_out; struct sk_buff_head wakeupq; /* Fragmentation/reassembly */ @@ -302,9 +304,4 @@ static inline int link_reset_reset(struct tipc_link *l_ptr) return l_ptr->state == RESET_RESET; } -static inline int link_congested(struct tipc_link *l_ptr) -{ - return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0]; -} - #endif diff --git a/net/tipc/msg.c b/net/tipc/msg.c index b6eb90cd3ef7..0c6dad8180a0 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -1,7 +1,7 @@ /* * net/tipc/msg.c: TIPC message header routines * - * Copyright (c) 2000-2006, 2014, Ericsson AB + * Copyright (c) 2000-2006, 2014-2015, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -165,6 +165,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) } if (fragid == LAST_FRAGMENT) { + TIPC_SKB_CB(head)->validated = false; + if (unlikely(!tipc_msg_validate(head))) + goto err; *buf = head; TIPC_SKB_CB(head)->tail = NULL; *headbuf = NULL; @@ -172,7 +175,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) } *buf = NULL; return 0; - err: pr_warn_ratelimited("Unable to build fragment list\n"); kfree_skb(*buf); @@ -181,6 +183,48 @@ err: return 0; } +/* tipc_msg_validate - validate basic format of received message + * + * This routine ensures a TIPC message has an acceptable header, and at least + * as much data as the header indicates it should. The routine also ensures + * that the entire message header is stored in the main fragment of the message + * buffer, to simplify future access to message header fields. + * + * Note: Having extra info present in the message header or data areas is OK. + * TIPC will ignore the excess, under the assumption that it is optional info + * introduced by a later release of the protocol. + */ +bool tipc_msg_validate(struct sk_buff *skb) +{ + struct tipc_msg *msg; + int msz, hsz; + + if (unlikely(TIPC_SKB_CB(skb)->validated)) + return true; + if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) + return false; + + hsz = msg_hdr_sz(buf_msg(skb)); + if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) + return false; + if (unlikely(!pskb_may_pull(skb, hsz))) + return false; + + msg = buf_msg(skb); + if (unlikely(msg_version(msg) != TIPC_VERSION)) + return false; + + msz = msg_size(msg); + if (unlikely(msz < hsz)) + return false; + if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) + return false; + if (unlikely(skb->len < msz)) + return false; + + TIPC_SKB_CB(skb)->validated = true; + return true; +} /** * tipc_msg_build - create buffer chain containing specified header and data @@ -228,6 +272,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); msg_set_size(&pkthdr, pktmax); msg_set_fragm_no(&pkthdr, pktno); + msg_set_importance(&pkthdr, msg_importance(mhdr)); /* Prepare first fragment */ skb = tipc_buf_acquire(pktmax); @@ -286,33 +331,36 @@ error: /** * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one - * @list: the buffer chain of the existing buffer ("bundle") + * @bskb: the buffer to append to ("bundle") * @skb: buffer to be appended * @mtu: max allowable size for the bundle buffer * Consumes buffer if successful * Returns true if bundling could be performed, otherwise false */ -bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) +bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu) { - struct sk_buff *bskb = skb_peek_tail(list); - struct tipc_msg *bmsg = buf_msg(bskb); + struct tipc_msg *bmsg; struct tipc_msg *msg = buf_msg(skb); - unsigned int bsz = msg_size(bmsg); + unsigned int bsz; unsigned int msz = msg_size(msg); - u32 start = align(bsz); + u32 start, pad; u32 max = mtu - INT_H_SIZE; - u32 pad = start - bsz; if (likely(msg_user(msg) == MSG_FRAGMENTER)) return false; + if (!bskb) + return false; + bmsg = buf_msg(bskb); + bsz = msg_size(bmsg); + start = align(bsz); + pad = start - bsz; + if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) return false; if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) return false; if (likely(msg_user(bmsg) != MSG_BUNDLER)) return false; - if (likely(!TIPC_SKB_CB(bskb)->bundling)) - return false; if (unlikely(skb_tailroom(bskb) < (pad + msz))) return false; if (unlikely(max < (start + msz))) @@ -328,34 +376,40 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu) /** * tipc_msg_extract(): extract bundled inner packet from buffer - * @skb: linear outer buffer, to be extracted from. + * @skb: buffer to be extracted from. * @iskb: extracted inner buffer, to be returned - * @pos: position of msg to be extracted. Returns with pointer of next msg + * @pos: position in outer message of msg to be extracted. + * Returns position of next msg * Consumes outer buffer when last packet extracted * Returns true when when there is an extracted buffer, otherwise false */ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) { - struct tipc_msg *msg = buf_msg(skb); - int imsz; - struct tipc_msg *imsg = (struct tipc_msg *)(msg_data(msg) + *pos); + struct tipc_msg *msg; + int imsz, offset; + + *iskb = NULL; + if (unlikely(skb_linearize(skb))) + goto none; - /* Is there space left for shortest possible message? */ - if (*pos > (msg_data_sz(msg) - SHORT_H_SIZE)) + msg = buf_msg(skb); + offset = msg_hdr_sz(msg) + *pos; + if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE))) goto none; - imsz = msg_size(imsg); - /* Is there space left for current message ? */ - if ((*pos + imsz) > msg_data_sz(msg)) + *iskb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!*iskb)) goto none; - *iskb = tipc_buf_acquire(imsz); - if (!*iskb) + skb_pull(*iskb, offset); + imsz = msg_size(buf_msg(*iskb)); + skb_trim(*iskb, imsz); + if (unlikely(!tipc_msg_validate(*iskb))) goto none; - skb_copy_to_linear_data(*iskb, imsg, imsz); *pos += align(imsz); return true; none: kfree_skb(skb); + kfree_skb(*iskb); *iskb = NULL; return false; } @@ -369,12 +423,11 @@ none: * Replaces buffer if successful * Returns true if success, otherwise false */ -bool tipc_msg_make_bundle(struct sk_buff_head *list, - struct sk_buff *skb, u32 mtu, u32 dnode) +bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode) { struct sk_buff *bskb; struct tipc_msg *bmsg; - struct tipc_msg *msg = buf_msg(skb); + struct tipc_msg *msg = buf_msg(*skb); u32 msz = msg_size(msg); u32 max = mtu - INT_H_SIZE; @@ -398,9 +451,9 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, msg_set_seqno(bmsg, msg_seqno(msg)); msg_set_ack(bmsg, msg_ack(msg)); msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); - TIPC_SKB_CB(bskb)->bundling = true; - __skb_queue_tail(list, bskb); - return tipc_msg_bundle(list, skb, mtu); + tipc_msg_bundle(bskb, *skb, mtu); + *skb = bskb; + return true; } /** @@ -415,21 +468,17 @@ bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode, int err) { struct tipc_msg *msg = buf_msg(buf); - uint imp = msg_importance(msg); struct tipc_msg ohdr; uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE); if (skb_linearize(buf)) goto exit; + msg = buf_msg(buf); if (msg_dest_droppable(msg)) goto exit; if (msg_errcode(msg)) goto exit; - memcpy(&ohdr, msg, msg_hdr_sz(msg)); - imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE); - if (msg_isdata(msg)) - msg_set_importance(msg, imp); msg_set_errcode(msg, err); msg_set_origport(msg, msg_destport(&ohdr)); msg_set_destport(msg, msg_origport(&ohdr)); diff --git a/net/tipc/msg.h b/net/tipc/msg.h index fa167846d1ab..bd3969a80dd4 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -1,7 +1,7 @@ /* * net/tipc/msg.h: Include file for TIPC message header routines * - * Copyright (c) 2000-2007, 2014, Ericsson AB + * Copyright (c) 2000-2007, 2014-2015 Ericsson AB * Copyright (c) 2005-2008, 2010-2011, Wind River Systems * All rights reserved. * @@ -54,6 +54,8 @@ struct plist; * - TIPC_HIGH_IMPORTANCE * - TIPC_CRITICAL_IMPORTANCE */ +#define TIPC_SYSTEM_IMPORTANCE 4 + /* * Payload message types @@ -64,6 +66,19 @@ struct plist; #define TIPC_DIRECT_MSG 3 /* + * Internal message users + */ +#define BCAST_PROTOCOL 5 +#define MSG_BUNDLER 6 +#define LINK_PROTOCOL 7 +#define CONN_MANAGER 8 +#define CHANGEOVER_PROTOCOL 10 +#define NAME_DISTRIBUTOR 11 +#define MSG_FRAGMENTER 12 +#define LINK_CONFIG 13 +#define SOCK_WAKEUP 14 /* pseudo user */ + +/* * Message header sizes */ #define SHORT_H_SIZE 24 /* In-cluster basic payload message */ @@ -92,7 +107,7 @@ struct plist; struct tipc_skb_cb { void *handle; struct sk_buff *tail; - bool deferred; + bool validated; bool wakeup_pending; bool bundling; u16 chain_sz; @@ -170,16 +185,6 @@ static inline void msg_set_user(struct tipc_msg *m, u32 n) msg_set_bits(m, 0, 25, 0xf, n); } -static inline u32 msg_importance(struct tipc_msg *m) -{ - return msg_bits(m, 0, 25, 0xf); -} - -static inline void msg_set_importance(struct tipc_msg *m, u32 i) -{ - msg_set_user(m, i); -} - static inline u32 msg_hdr_sz(struct tipc_msg *m) { return msg_bits(m, 0, 21, 0xf) << 2; @@ -336,6 +341,25 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n) /* * Words 3-10 */ +static inline u32 msg_importance(struct tipc_msg *m) +{ + if (unlikely(msg_user(m) == MSG_FRAGMENTER)) + return msg_bits(m, 5, 13, 0x7); + if (likely(msg_isdata(m) && !msg_errcode(m))) + return msg_user(m); + return TIPC_SYSTEM_IMPORTANCE; +} + +static inline void msg_set_importance(struct tipc_msg *m, u32 i) +{ + if (unlikely(msg_user(m) == MSG_FRAGMENTER)) + msg_set_bits(m, 5, 13, 0x7, i); + else if (likely(i < TIPC_SYSTEM_IMPORTANCE)) + msg_set_user(m, i); + else + pr_warn("Trying to set illegal importance in message\n"); +} + static inline u32 msg_prevnode(struct tipc_msg *m) { return msg_word(m, 3); @@ -458,20 +482,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) */ /* - * Internal message users - */ -#define BCAST_PROTOCOL 5 -#define MSG_BUNDLER 6 -#define LINK_PROTOCOL 7 -#define CONN_MANAGER 8 -#define ROUTE_DISTRIBUTOR 9 /* obsoleted */ -#define CHANGEOVER_PROTOCOL 10 -#define NAME_DISTRIBUTOR 11 -#define MSG_FRAGMENTER 12 -#define LINK_CONFIG 13 -#define SOCK_WAKEUP 14 /* pseudo user */ - -/* * Connection management protocol message types */ #define CONN_PROBE 0 @@ -510,7 +520,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) #define DSC_REQ_MSG 0 #define DSC_RESP_MSG 1 - /* * Word 1 */ @@ -534,6 +543,16 @@ static inline void msg_set_node_sig(struct tipc_msg *m, u32 n) msg_set_bits(m, 1, 0, 0xffff, n); } +static inline u32 msg_node_capabilities(struct tipc_msg *m) +{ + return msg_bits(m, 1, 15, 0x1fff); +} + +static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n) +{ + msg_set_bits(m, 1, 15, 0x1fff, n); +} + /* * Word 2 @@ -734,13 +753,6 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n) msg_set_bits(m, 9, 0, 0xffff, n); } -static inline u32 tipc_msg_tot_importance(struct tipc_msg *m) -{ - if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT)) - return msg_importance(msg_get_wrapped(m)); - return msg_importance(m); -} - static inline u32 msg_tot_origport(struct tipc_msg *m) { if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT)) @@ -749,6 +761,7 @@ static inline u32 msg_tot_origport(struct tipc_msg *m) } struct sk_buff *tipc_buf_acquire(u32 size); +bool tipc_msg_validate(struct sk_buff *skb); bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode, int err); void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type, @@ -757,9 +770,9 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, uint data_sz, u32 dnode, u32 onode, u32 dport, u32 oport, int errcode); int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); -bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu); -bool tipc_msg_make_bundle(struct sk_buff_head *list, - struct sk_buff *skb, u32 mtu, u32 dnode); +bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu); + +bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode); bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos); int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, int dsz, int mtu, struct sk_buff_head *list); diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 105ba7adf06f..ab0ac62a1287 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -811,8 +811,8 @@ static void tipc_purge_publications(struct net *net, struct name_seq *seq) sseq = seq->sseqs; info = sseq->info; list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { - tipc_nametbl_remove_publ(net, publ->type, publ->lower, - publ->node, publ->ref, publ->key); + tipc_nameseq_remove_publ(net, seq, publ->lower, publ->node, + publ->ref, publ->key); kfree_rcu(publ, rcu); } hlist_del_init_rcu(&seq->ns_list); diff --git a/net/tipc/node.c b/net/tipc/node.c index 86152de8248d..26d1de1bf34d 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -111,7 +111,7 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr) INIT_LIST_HEAD(&n_ptr->list); INIT_LIST_HEAD(&n_ptr->publ_list); INIT_LIST_HEAD(&n_ptr->conn_sks); - __skb_queue_head_init(&n_ptr->bclink.deferred_queue); + __skb_queue_head_init(&n_ptr->bclink.deferdq); hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); list_for_each_entry_rcu(temp_node, &tn->node_list, list) { if (n_ptr->addr < temp_node->addr) @@ -354,7 +354,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) /* Flush broadcast link info associated with lost node */ if (n_ptr->bclink.recv_permitted) { - __skb_queue_purge(&n_ptr->bclink.deferred_queue); + __skb_queue_purge(&n_ptr->bclink.deferdq); if (n_ptr->bclink.reasm_buf) { kfree_skb(n_ptr->bclink.reasm_buf); diff --git a/net/tipc/node.h b/net/tipc/node.h index 3d18c66b7f78..e89ac04ec2c3 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -84,7 +84,7 @@ struct tipc_node_bclink { u32 last_sent; u32 oos_state; u32 deferred_size; - struct sk_buff_head deferred_queue; + struct sk_buff_head deferdq; struct sk_buff *reasm_buf; int inputq_map; bool recv_permitted; @@ -106,6 +106,7 @@ struct tipc_node_bclink { * @list: links to adjacent nodes in sorted list of cluster's nodes * @working_links: number of working links to node (both active and standby) * @link_cnt: number of links to node + * @capabilities: bitmap, indicating peer node's functional capabilities * @signature: node instance identifier * @link_id: local and remote bearer ids of changing link, if any * @publ_list: list of publications @@ -125,7 +126,8 @@ struct tipc_node { struct tipc_node_bclink bclink; struct list_head list; int link_cnt; - int working_links; + u16 working_links; + u16 capabilities; u32 signature; u32 link_id; struct list_head publ_list; diff --git a/net/tipc/server.c b/net/tipc/server.c index eadd4ed45905..ab6183cdb121 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -37,11 +37,13 @@ #include "core.h" #include "socket.h" #include <net/sock.h> +#include <linux/module.h> /* Number of messages to send before rescheduling */ #define MAX_SEND_MSG_COUNT 25 #define MAX_RECV_MSG_COUNT 25 #define CF_CONNECTED 1 +#define CF_SERVER 2 #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data) @@ -88,9 +90,19 @@ static void tipc_clean_outqueues(struct tipc_conn *con); static void tipc_conn_kref_release(struct kref *kref) { struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); + struct sockaddr_tipc *saddr = con->server->saddr; + struct socket *sock = con->sock; + struct sock *sk; - if (con->sock) { - tipc_sock_release_local(con->sock); + if (sock) { + sk = sock->sk; + if (test_bit(CF_SERVER, &con->flags)) { + __module_get(sock->ops->owner); + __module_get(sk->sk_prot_creator->owner); + } + saddr->scope = -TIPC_NODE_SCOPE; + kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); + sk_release_kernel(sk); con->sock = NULL; } @@ -281,7 +293,7 @@ static int tipc_accept_from_sock(struct tipc_conn *con) struct tipc_conn *newcon; int ret; - ret = tipc_sock_accept_local(sock, &newsock, O_NONBLOCK); + ret = kernel_accept(sock, &newsock, O_NONBLOCK); if (ret < 0) return ret; @@ -309,9 +321,12 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con) struct socket *sock = NULL; int ret; - ret = tipc_sock_create_local(s->net, s->type, &sock); + ret = sock_create_kern(AF_TIPC, SOCK_SEQPACKET, 0, &sock); if (ret < 0) return NULL; + + sk_change_net(sock->sk, s->net); + ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, (char *)&s->imp, sizeof(s->imp)); if (ret < 0) @@ -337,11 +352,31 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con) pr_err("Unknown socket type %d\n", s->type); goto create_err; } + + /* As server's listening socket owner and creator is the same module, + * we have to decrease TIPC module reference count to guarantee that + * it remains zero after the server socket is created, otherwise, + * executing "rmmod" command is unable to make TIPC module deleted + * after TIPC module is inserted successfully. + * + * However, the reference count is ever increased twice in + * sock_create_kern(): one is to increase the reference count of owner + * of TIPC socket's proto_ops struct; another is to increment the + * reference count of owner of TIPC proto struct. Therefore, we must + * decrement the module reference count twice to ensure that it keeps + * zero after server's listening socket is created. Of course, we + * must bump the module reference count twice as well before the socket + * is closed. + */ + module_put(sock->ops->owner); + module_put(sock->sk->sk_prot_creator->owner); + set_bit(CF_SERVER, &con->flags); + return sock; create_err: - sock_release(sock); - con->sock = NULL; + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); return NULL; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 934947f038b6..73c2f518a7c0 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -74,6 +74,7 @@ * @link_cong: non-zero if owner must sleep because of link congestion * @sent_unacked: # messages sent by socket, and not yet acked by peer * @rcv_unacked: # messages read by user, but not yet acked back to peer + * @remote: 'connected' peer for dgram/rdm * @node: hash table node * @rcu: rcu struct for tipc_sock */ @@ -96,6 +97,7 @@ struct tipc_sock { bool link_cong; uint sent_unacked; uint rcv_unacked; + struct sockaddr_tipc remote; struct rhash_head node; struct rcu_head rcu; }; @@ -121,9 +123,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz); static const struct proto_ops packet_ops; static const struct proto_ops stream_ops; static const struct proto_ops msg_ops; - static struct proto tipc_proto; -static struct proto tipc_proto_kern; static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC }, @@ -133,6 +133,8 @@ static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG } }; +static const struct rhashtable_params tsk_rht_params; + /* * Revised TIPC socket locking policy: * @@ -341,11 +343,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, } /* Allocate socket's protocol area */ - if (!kern) - sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); - else - sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern); - + sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); if (sk == NULL) return -ENOMEM; @@ -383,75 +381,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, return 0; } -/** - * tipc_sock_create_local - create TIPC socket from inside TIPC module - * @type: socket type - SOCK_RDM or SOCK_SEQPACKET - * - * We cannot use sock_creat_kern here because it bumps module user count. - * Since socket owner and creator is the same module we must make sure - * that module count remains zero for module local sockets, otherwise - * we cannot do rmmod. - * - * Returns 0 on success, errno otherwise - */ -int tipc_sock_create_local(struct net *net, int type, struct socket **res) -{ - int rc; - - rc = sock_create_lite(AF_TIPC, type, 0, res); - if (rc < 0) { - pr_err("Failed to create kernel socket\n"); - return rc; - } - tipc_sk_create(net, *res, 0, 1); - - return 0; -} - -/** - * tipc_sock_release_local - release socket created by tipc_sock_create_local - * @sock: the socket to be released. - * - * Module reference count is not incremented when such sockets are created, - * so we must keep it from being decremented when they are released. - */ -void tipc_sock_release_local(struct socket *sock) -{ - tipc_release(sock); - sock->ops = NULL; - sock_release(sock); -} - -/** - * tipc_sock_accept_local - accept a connection on a socket created - * with tipc_sock_create_local. Use this function to avoid that - * module reference count is inadvertently incremented. - * - * @sock: the accepting socket - * @newsock: reference to the new socket to be created - * @flags: socket flags - */ - -int tipc_sock_accept_local(struct socket *sock, struct socket **newsock, - int flags) -{ - struct sock *sk = sock->sk; - int ret; - - ret = sock_create_lite(sk->sk_family, sk->sk_type, - sk->sk_protocol, newsock); - if (ret < 0) - return ret; - - ret = tipc_accept(sock, *newsock, flags); - if (ret < 0) { - sock_release(*newsock); - return ret; - } - (*newsock)->ops = sock->ops; - return ret; -} - static void tipc_sk_callback(struct rcu_head *head) { struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); @@ -929,22 +858,23 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) u32 dnode, dport; struct sk_buff_head *pktchain = &sk->sk_write_queue; struct sk_buff *skb; - struct tipc_name_seq *seq = &dest->addr.nameseq; + struct tipc_name_seq *seq; struct iov_iter save; u32 mtu; long timeo; int rc; - if (unlikely(!dest)) - return -EDESTADDRREQ; - - if (unlikely((m->msg_namelen < sizeof(*dest)) || - (dest->family != AF_TIPC))) - return -EINVAL; - if (dsz > TIPC_MAX_USER_MSG_SIZE) return -EMSGSIZE; - + if (unlikely(!dest)) { + if (tsk->connected && sock->state == SS_READY) + dest = &tsk->remote; + else + return -EDESTADDRREQ; + } else if (unlikely(m->msg_namelen < sizeof(*dest)) || + dest->family != AF_TIPC) { + return -EINVAL; + } if (unlikely(sock->state != SS_READY)) { if (sock->state == SS_LISTENING) return -EPIPE; @@ -957,7 +887,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz) tsk->conn_instance = dest->addr.name.name.instance; } } - + seq = &dest->addr.nameseq; timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); if (dest->addrtype == TIPC_ADDR_MCAST) { @@ -1908,17 +1838,24 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest, int destlen, int flags) { struct sock *sk = sock->sk; + struct tipc_sock *tsk = tipc_sk(sk); struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; struct msghdr m = {NULL,}; - long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout; + long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout; socket_state previous; - int res; + int res = 0; lock_sock(sk); - /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */ + /* DGRAM/RDM connect(), just save the destaddr */ if (sock->state == SS_READY) { - res = -EOPNOTSUPP; + if (dst->family == AF_UNSPEC) { + memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc)); + tsk->connected = 0; + } else { + memcpy(&tsk->remote, dest, destlen); + tsk->connected = 1; + } goto exit; } @@ -2153,7 +2090,6 @@ restart: TIPC_CONN_SHUTDOWN)) tipc_link_xmit_skb(net, skb, dnode, tsk->portid); - tipc_node_remove_conn(net, dnode, tsk->portid); } else { dnode = tsk_peer_node(tsk); @@ -2311,7 +2247,7 @@ static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) struct tipc_sock *tsk; rcu_read_lock(); - tsk = rhashtable_lookup(&tn->sk_rht, &portid); + tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params); if (tsk) sock_hold(&tsk->sk); rcu_read_unlock(); @@ -2333,7 +2269,8 @@ static int tipc_sk_insert(struct tipc_sock *tsk) portid = TIPC_MIN_PORT; tsk->portid = portid; sock_hold(&tsk->sk); - if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node)) + if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node, + tsk_rht_params)) return 0; sock_put(&tsk->sk); } @@ -2346,26 +2283,27 @@ static void tipc_sk_remove(struct tipc_sock *tsk) struct sock *sk = &tsk->sk; struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id); - if (rhashtable_remove(&tn->sk_rht, &tsk->node)) { + if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) { WARN_ON(atomic_read(&sk->sk_refcnt) == 1); __sock_put(sk); } } +static const struct rhashtable_params tsk_rht_params = { + .nelem_hint = 192, + .head_offset = offsetof(struct tipc_sock, node), + .key_offset = offsetof(struct tipc_sock, portid), + .key_len = sizeof(u32), /* portid */ + .hashfn = jhash, + .max_size = 1048576, + .min_size = 256, +}; + int tipc_sk_rht_init(struct net *net) { struct tipc_net *tn = net_generic(net, tipc_net_id); - struct rhashtable_params rht_params = { - .nelem_hint = 192, - .head_offset = offsetof(struct tipc_sock, node), - .key_offset = offsetof(struct tipc_sock, portid), - .key_len = sizeof(u32), /* portid */ - .hashfn = jhash, - .max_shift = 20, /* 1M */ - .min_shift = 8, /* 256 */ - }; - return rhashtable_init(&tn->sk_rht, &rht_params); + return rhashtable_init(&tn->sk_rht, &tsk_rht_params); } void tipc_sk_rht_destroy(struct net *net) @@ -2608,12 +2546,6 @@ static struct proto tipc_proto = { .sysctl_rmem = sysctl_tipc_rmem }; -static struct proto tipc_proto_kern = { - .name = "TIPC", - .obj_size = sizeof(struct tipc_sock), - .sysctl_rmem = sysctl_tipc_rmem -}; - /** * tipc_socket_init - initialize TIPC socket interface * diff --git a/net/tipc/socket.h b/net/tipc/socket.h index 238f1b7bd9bd..bf6551389522 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h @@ -44,10 +44,6 @@ SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) int tipc_socket_init(void); void tipc_socket_stop(void); -int tipc_sock_create_local(struct net *net, int type, struct socket **res); -void tipc_sock_release_local(struct socket *sock); -int tipc_sock_accept_local(struct socket *sock, struct socket **newsock, - int flags); int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq); void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, struct sk_buff_head *inputq); diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index fc2fb11a354d..ac89101e5d1b 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -44,6 +44,7 @@ #include <net/sock.h> #include <net/ip.h> #include <net/udp_tunnel.h> +#include <net/addrconf.h> #include <linux/tipc_netlink.h> #include "core.h" #include "bearer.h" @@ -246,11 +247,14 @@ static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote) return 0; mreqn.imr_multiaddr = remote->ipv4; mreqn.imr_ifindex = ub->ifindex; - err = __ip_mc_join_group(sk, &mreqn); + err = ip_mc_join_group(sk, &mreqn); +#if IS_ENABLED(CONFIG_IPV6) } else { if (!ipv6_addr_is_multicast(&remote->ipv6)) return 0; - err = __ipv6_sock_mc_join(sk, ub->ifindex, &remote->ipv6); + err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex, + &remote->ipv6); +#endif } return err; } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 864b782c0202..d6ba4a6bbff6 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4400,6 +4400,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) if (parse_station_flags(info, dev->ieee80211_ptr->iftype, ¶ms)) return -EINVAL; + /* HT/VHT requires QoS, but if we don't have that just ignore HT/VHT + * as userspace might just pass through the capabilities from the IEs + * directly, rather than enforcing this restriction and returning an + * error in this case. + */ + if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME))) { + params.ht_capa = NULL; + params.vht_capa = NULL; + } + /* When you run into this, adjust the code below for the new flag */ BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index cee479bc655c..638af0655aaf 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2269,11 +2269,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, * have the xfrm_state's. We need to wait for KM to * negotiate new SA's or bail out with error.*/ if (net->xfrm.sysctl_larval_drop) { - dst_release(dst); - xfrm_pols_put(pols, drop_pols); XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); - - return ERR_PTR(-EREMOTE); + err = -EREMOTE; + goto error; } err = -EAGAIN; @@ -2324,7 +2322,8 @@ nopol: error: dst_release(dst); dropdst: - dst_release(dst_orig); + if (!(flags & XFRM_LOOKUP_KEEP_DST_REF)) + dst_release(dst_orig); xfrm_pols_put(pols, drop_pols); return ERR_PTR(err); } @@ -2338,7 +2337,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, struct sock *sk, int flags) { struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, - flags | XFRM_LOOKUP_QUEUE); + flags | XFRM_LOOKUP_QUEUE | + XFRM_LOOKUP_KEEP_DST_REF); if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) return make_blackhole(net, dst_orig->ops->family, dst_orig); diff --git a/samples/bpf/sockex1_kern.c b/samples/bpf/sockex1_kern.c index 066892662915..ed18e9a4909c 100644 --- a/samples/bpf/sockex1_kern.c +++ b/samples/bpf/sockex1_kern.c @@ -1,5 +1,6 @@ #include <uapi/linux/bpf.h> #include <uapi/linux/if_ether.h> +#include <uapi/linux/if_packet.h> #include <uapi/linux/ip.h> #include "bpf_helpers.h" @@ -11,14 +12,17 @@ struct bpf_map_def SEC("maps") my_map = { }; SEC("socket1") -int bpf_prog1(struct sk_buff *skb) +int bpf_prog1(struct __sk_buff *skb) { int index = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); long *value; + if (skb->pkt_type != PACKET_OUTGOING) + return 0; + value = bpf_map_lookup_elem(&my_map, &index); if (value) - __sync_fetch_and_add(value, 1); + __sync_fetch_and_add(value, skb->len); return 0; } diff --git a/samples/bpf/sockex1_user.c b/samples/bpf/sockex1_user.c index 34a443ff3831..678ce4693551 100644 --- a/samples/bpf/sockex1_user.c +++ b/samples/bpf/sockex1_user.c @@ -40,7 +40,7 @@ int main(int ac, char **argv) key = IPPROTO_ICMP; assert(bpf_lookup_elem(map_fd[0], &key, &icmp_cnt) == 0); - printf("TCP %lld UDP %lld ICMP %lld packets\n", + printf("TCP %lld UDP %lld ICMP %lld bytes\n", tcp_cnt, udp_cnt, icmp_cnt); sleep(1); } diff --git a/samples/bpf/sockex2_kern.c b/samples/bpf/sockex2_kern.c index 6f0135f0f217..ba0e177ff561 100644 --- a/samples/bpf/sockex2_kern.c +++ b/samples/bpf/sockex2_kern.c @@ -42,13 +42,13 @@ static inline int proto_ports_offset(__u64 proto) } } -static inline int ip_is_fragment(struct sk_buff *ctx, __u64 nhoff) +static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff) { return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off)) & (IP_MF | IP_OFFSET); } -static inline __u32 ipv6_addr_hash(struct sk_buff *ctx, __u64 off) +static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off) { __u64 w0 = load_word(ctx, off); __u64 w1 = load_word(ctx, off + 4); @@ -58,7 +58,7 @@ static inline __u32 ipv6_addr_hash(struct sk_buff *ctx, __u64 off) return (__u32)(w0 ^ w1 ^ w2 ^ w3); } -static inline __u64 parse_ip(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto, +static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto, struct flow_keys *flow) { __u64 verlen; @@ -82,7 +82,7 @@ static inline __u64 parse_ip(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto, return nhoff; } -static inline __u64 parse_ipv6(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto, +static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto, struct flow_keys *flow) { *ip_proto = load_byte(skb, @@ -96,7 +96,7 @@ static inline __u64 parse_ipv6(struct sk_buff *skb, __u64 nhoff, __u64 *ip_proto return nhoff; } -static inline bool flow_dissector(struct sk_buff *skb, struct flow_keys *flow) +static inline bool flow_dissector(struct __sk_buff *skb, struct flow_keys *flow) { __u64 nhoff = ETH_HLEN; __u64 ip_proto; @@ -183,18 +183,23 @@ static inline bool flow_dissector(struct sk_buff *skb, struct flow_keys *flow) return true; } +struct pair { + long packets; + long bytes; +}; + struct bpf_map_def SEC("maps") hash_map = { .type = BPF_MAP_TYPE_HASH, .key_size = sizeof(__be32), - .value_size = sizeof(long), + .value_size = sizeof(struct pair), .max_entries = 1024, }; SEC("socket2") -int bpf_prog2(struct sk_buff *skb) +int bpf_prog2(struct __sk_buff *skb) { struct flow_keys flow; - long *value; + struct pair *value; u32 key; if (!flow_dissector(skb, &flow)) @@ -203,9 +208,10 @@ int bpf_prog2(struct sk_buff *skb) key = flow.dst; value = bpf_map_lookup_elem(&hash_map, &key); if (value) { - __sync_fetch_and_add(value, 1); + __sync_fetch_and_add(&value->packets, 1); + __sync_fetch_and_add(&value->bytes, skb->len); } else { - long val = 1; + struct pair val = {1, skb->len}; bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY); } diff --git a/samples/bpf/sockex2_user.c b/samples/bpf/sockex2_user.c index d2d5f5a790d3..29a276d766fc 100644 --- a/samples/bpf/sockex2_user.c +++ b/samples/bpf/sockex2_user.c @@ -6,6 +6,11 @@ #include <unistd.h> #include <arpa/inet.h> +struct pair { + __u64 packets; + __u64 bytes; +}; + int main(int ac, char **argv) { char filename[256]; @@ -29,13 +34,13 @@ int main(int ac, char **argv) for (i = 0; i < 5; i++) { int key = 0, next_key; - long long value; + struct pair value; while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) { bpf_lookup_elem(map_fd[0], &next_key, &value); - printf("ip %s count %lld\n", + printf("ip %s bytes %lld packets %lld\n", inet_ntoa((struct in_addr){htonl(next_key)}), - value); + value.bytes, value.packets); key = next_key; } sleep(1); diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index 7b56b59fad8e..75d561f9fd6a 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c @@ -14,6 +14,7 @@ #include <linux/unistd.h> #include <string.h> #include <linux/filter.h> +#include <stddef.h> #include "libbpf.h" #define MAX_INSNS 512 @@ -642,6 +643,84 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, }, + { + "access skb fields ok", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, pkt_type)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, queue_mapping)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, protocol)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, vlan_present)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, vlan_tci)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "access skb fields bad1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "access skb fields bad2", + .insns = { + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, pkt_type)), + BPF_EXIT_INSN(), + }, + .fixup = {4}, + .errstr = "different pointers", + .result = REJECT, + }, + { + "access skb fields bad3", + .insns = { + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, pkt_type)), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JA, 0, 0, -12), + }, + .fixup = {6}, + .errstr = "different pointers", + .result = REJECT, + }, }; static int probe_filter_length(struct bpf_insn *fp) diff --git a/security/capability.c b/security/capability.c index 070dd46f62f4..58a1600c149b 100644 --- a/security/capability.c +++ b/security/capability.c @@ -776,11 +776,6 @@ static int cap_tun_dev_open(void *security) { return 0; } - -static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk) -{ -} - #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -1134,7 +1129,6 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, tun_dev_open); set_to_cap_if_null(ops, tun_dev_attach_queue); set_to_cap_if_null(ops, tun_dev_attach); - set_to_cap_if_null(ops, skb_owned_by); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM set_to_cap_if_null(ops, xfrm_policy_alloc_security); diff --git a/security/security.c b/security/security.c index e81d5bbe7363..1f475aa53288 100644 --- a/security/security.c +++ b/security/security.c @@ -1359,11 +1359,6 @@ int security_tun_dev_open(void *security) } EXPORT_SYMBOL(security_tun_dev_open); -void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) -{ - security_ops->skb_owned_by(skb, sk); -} - #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 4d1a54190388..edc66de39f2e 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -51,7 +51,6 @@ #include <linux/tty.h> #include <net/icmp.h> #include <net/ip.h> /* for local_port_range[] */ -#include <net/sock.h> #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ #include <net/inet_connection_sock.h> #include <net/net_namespace.h> @@ -4652,11 +4651,6 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb) selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); } -static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk) -{ - skb_set_owner_w(skb, sk); -} - static int selinux_secmark_relabel_packet(u32 sid) { const struct task_security_struct *__tsec; @@ -6041,7 +6035,6 @@ static struct security_operations selinux_ops = { .tun_dev_attach_queue = selinux_tun_dev_attach_queue, .tun_dev_attach = selinux_tun_dev_attach, .tun_dev_open = selinux_tun_dev_open, - .skb_owned_by = selinux_skb_owned_by, #ifdef CONFIG_SECURITY_NETWORK_XFRM .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, diff --git a/sound/core/control.c b/sound/core/control.c index 35324a8e83c8..eeb691d1911f 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1170,6 +1170,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, if (info->count < 1) return -EINVAL; + if (!*info->id.name) + return -EINVAL; + if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name)) + return -EINVAL; access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| diff --git a/sound/firewire/dice/dice-interface.h b/sound/firewire/dice/dice-interface.h index de7602bd69b5..27b044f84c81 100644 --- a/sound/firewire/dice/dice-interface.h +++ b/sound/firewire/dice/dice-interface.h @@ -299,23 +299,23 @@ #define RX_ISOCHRONOUS 0x008 /* + * Index of first quadlet to be interpreted; read/write. If > 0, that many + * quadlets at the beginning of each data block will be ignored, and all the + * audio and MIDI quadlets will follow. + */ +#define RX_SEQ_START 0x00c + +/* * The number of audio channels; read-only. There will be one quadlet per * channel. */ -#define RX_NUMBER_AUDIO 0x00c +#define RX_NUMBER_AUDIO 0x010 /* * The number of MIDI ports, 0-8; read-only. If > 0, there will be one * additional quadlet in each data block, following the audio quadlets. */ -#define RX_NUMBER_MIDI 0x010 - -/* - * Index of first quadlet to be interpreted; read/write. If > 0, that many - * quadlets at the beginning of each data block will be ignored, and all the - * audio and MIDI quadlets will follow. - */ -#define RX_SEQ_START 0x014 +#define RX_NUMBER_MIDI 0x014 /* * Names of all audio channels; read-only. Quadlets are byte-swapped. Names diff --git a/sound/firewire/dice/dice-proc.c b/sound/firewire/dice/dice-proc.c index ecfe20fd4de5..f5c1d1bced59 100644 --- a/sound/firewire/dice/dice-proc.c +++ b/sound/firewire/dice/dice-proc.c @@ -99,9 +99,9 @@ static void dice_proc_read(struct snd_info_entry *entry, } tx; struct { u32 iso; + u32 seq_start; u32 number_audio; u32 number_midi; - u32 seq_start; char names[RX_NAMES_SIZE]; u32 ac3_caps; u32 ac3_enable; @@ -204,10 +204,10 @@ static void dice_proc_read(struct snd_info_entry *entry, break; snd_iprintf(buffer, "rx %u:\n", stream); snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso); + snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start); snd_iprintf(buffer, " audio channels: %u\n", buf.rx.number_audio); snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi); - snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start); if (quadlets >= 68) { dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE); snd_iprintf(buffer, " names: %s\n", buf.rx.names); diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c index 5f17b77ee152..f0e4d502d604 100644 --- a/sound/firewire/iso-resources.c +++ b/sound/firewire/iso-resources.c @@ -26,7 +26,7 @@ int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit) { r->channels_mask = ~0uLL; - r->unit = fw_unit_get(unit); + r->unit = unit; mutex_init(&r->mutex); r->allocated = false; @@ -42,7 +42,6 @@ void fw_iso_resources_destroy(struct fw_iso_resources *r) { WARN_ON(r->allocated); mutex_destroy(&r->mutex); - fw_unit_put(r->unit); } EXPORT_SYMBOL(fw_iso_resources_destroy); diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index a2ce773bdc62..17c2637d842c 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1164,7 +1164,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, } } - if (!bus->no_response_fallback) + if (bus->no_response_fallback) return -1; if (!chip->polling_mode && chip->poll_count < 2) { diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index b680b4ec6331..8ec5289f8e05 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -687,12 +687,45 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, return val; } +/* is this a stereo widget or a stereo-to-mono mix? */ +static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir) +{ + unsigned int wcaps = get_wcaps(codec, nid); + hda_nid_t conn; + + if (wcaps & AC_WCAP_STEREO) + return true; + if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX) + return false; + if (snd_hda_get_num_conns(codec, nid) != 1) + return false; + if (snd_hda_get_connections(codec, nid, &conn, 1) < 0) + return false; + return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO); +} + /* initialize the amp value (only at the first time) */ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) { unsigned int caps = query_amp_caps(codec, nid, dir); int val = get_amp_val_to_activate(codec, nid, dir, caps, false); - snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); + + if (is_stereo_amps(codec, nid, dir)) + snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); + else + snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val); +} + +/* update the amp, doing in stereo or mono depending on NID */ +static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, + unsigned int mask, unsigned int val) +{ + if (is_stereo_amps(codec, nid, dir)) + return snd_hda_codec_amp_stereo(codec, nid, dir, idx, + mask, val); + else + return snd_hda_codec_amp_update(codec, nid, 0, dir, idx, + mask, val); } /* calculate amp value mask we can modify; @@ -732,7 +765,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir, return; val &= mask; - snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val); + update_amp(codec, nid, dir, idx, mask, val); } static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, @@ -4424,13 +4457,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix) has_amp = nid_has_mute(codec, mix, HDA_INPUT); for (i = 0; i < nums; i++) { if (has_amp) - snd_hda_codec_amp_stereo(codec, mix, - HDA_INPUT, i, - 0xff, HDA_AMP_MUTE); + update_amp(codec, mix, HDA_INPUT, i, + 0xff, HDA_AMP_MUTE); else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) - snd_hda_codec_amp_stereo(codec, conn[i], - HDA_OUTPUT, 0, - 0xff, HDA_AMP_MUTE); + update_amp(codec, conn[i], HDA_OUTPUT, 0, + 0xff, HDA_AMP_MUTE); } } diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c index ce5a6da83419..05e19f78b4cb 100644 --- a/sound/pci/hda/hda_proc.c +++ b/sound/pci/hda/hda_proc.c @@ -134,13 +134,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer, (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT); } +/* is this a stereo widget or a stereo-to-mono mix? */ +static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, + int dir, unsigned int wcaps, int indices) +{ + hda_nid_t conn; + + if (wcaps & AC_WCAP_STEREO) + return true; + /* check for a stereo-to-mono mix; it must be: + * only a single connection, only for input, and only a mixer widget + */ + if (indices != 1 || dir != HDA_INPUT || + get_wcaps_type(wcaps) != AC_WID_AUD_MIX) + return false; + + if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0) + return false; + /* the connection source is a stereo? */ + wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP); + return !!(wcaps & AC_WCAP_STEREO); +} + static void print_amp_vals(struct snd_info_buffer *buffer, struct hda_codec *codec, hda_nid_t nid, - int dir, int stereo, int indices) + int dir, unsigned int wcaps, int indices) { unsigned int val; + bool stereo; int i; + stereo = is_stereo_amps(codec, nid, dir, wcaps, indices); + dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT; for (i = 0; i < indices; i++) { snd_iprintf(buffer, " ["); @@ -757,12 +782,10 @@ static void print_codec_info(struct snd_info_entry *entry, (codec->single_adc_amp && wid_type == AC_WID_AUD_IN)) print_amp_vals(buffer, codec, nid, HDA_INPUT, - wid_caps & AC_WCAP_STEREO, - 1); + wid_caps, 1); else print_amp_vals(buffer, codec, nid, HDA_INPUT, - wid_caps & AC_WCAP_STEREO, - conn_len); + wid_caps, conn_len); } if (wid_caps & AC_WCAP_OUT_AMP) { snd_iprintf(buffer, " Amp-Out caps: "); @@ -771,11 +794,10 @@ static void print_codec_info(struct snd_info_entry *entry, if (wid_type == AC_WID_PIN && codec->pin_amp_workaround) print_amp_vals(buffer, codec, nid, HDA_OUTPUT, - wid_caps & AC_WCAP_STEREO, - conn_len); + wid_caps, conn_len); else print_amp_vals(buffer, codec, nid, HDA_OUTPUT, - wid_caps & AC_WCAP_STEREO, 1); + wid_caps, 1); } switch (wid_type) { diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 1589c9bcce3e..dd2b3d92071f 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -393,6 +393,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), + SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81), SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), {} /* terminator */ @@ -584,6 +585,7 @@ static int patch_cs420x(struct hda_codec *codec) return -ENOMEM; spec->gen.automute_hook = cs_automute; + codec->single_adc_amp = 1; snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, cs420x_fixups); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index fd3ed18670e9..da67ea8645a6 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -223,6 +223,7 @@ enum { CXT_PINCFG_LENOVO_TP410, CXT_PINCFG_LEMOTE_A1004, CXT_PINCFG_LEMOTE_A1205, + CXT_PINCFG_COMPAQ_CQ60, CXT_FIXUP_STEREO_DMIC, CXT_FIXUP_INC_MIC_BOOST, CXT_FIXUP_HEADPHONE_MIC_PIN, @@ -660,6 +661,15 @@ static const struct hda_fixup cxt_fixups[] = { .type = HDA_FIXUP_PINS, .v.pins = cxt_pincfg_lemote, }, + [CXT_PINCFG_COMPAQ_CQ60] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + /* 0x17 was falsely set up as a mic, it should 0x1d */ + { 0x17, 0x400001f0 }, + { 0x1d, 0x97a70120 }, + { } + } + }, [CXT_FIXUP_STEREO_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = cxt_fixup_stereo_dmic, @@ -769,6 +779,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = { }; static const struct snd_pci_quirk cxt5051_fixups[] = { + SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), {} }; diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c index b67480f1b1aa..4373ada95648 100644 --- a/sound/soc/codecs/adav80x.c +++ b/sound/soc/codecs/adav80x.c @@ -317,7 +317,7 @@ static int adav80x_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); - unsigned int deemph = ucontrol->value.enumerated.item[0]; + unsigned int deemph = ucontrol->value.integer.value[0]; if (deemph > 1) return -EINVAL; @@ -333,7 +333,7 @@ static int adav80x_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = adav80x->deemph; + ucontrol->value.integer.value[0] = adav80x->deemph; return 0; }; diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c index 70861c7b1631..81b54a270bd8 100644 --- a/sound/soc/codecs/ak4641.c +++ b/sound/soc/codecs/ak4641.c @@ -76,7 +76,7 @@ static int ak4641_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; if (deemph > 1) return -EINVAL; @@ -92,7 +92,7 @@ static int ak4641_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct ak4641_priv *ak4641 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = ak4641->deemph; + ucontrol->value.integer.value[0] = ak4641->deemph; return 0; }; diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c index 632e89f793a7..2a58b1dccd2f 100644 --- a/sound/soc/codecs/ak4671.c +++ b/sound/soc/codecs/ak4671.c @@ -343,25 +343,25 @@ static const struct snd_soc_dapm_widget ak4671_dapm_widgets[] = { }; static const struct snd_soc_dapm_route ak4671_intercon[] = { - {"DAC Left", "NULL", "PMPLL"}, - {"DAC Right", "NULL", "PMPLL"}, - {"ADC Left", "NULL", "PMPLL"}, - {"ADC Right", "NULL", "PMPLL"}, + {"DAC Left", NULL, "PMPLL"}, + {"DAC Right", NULL, "PMPLL"}, + {"ADC Left", NULL, "PMPLL"}, + {"ADC Right", NULL, "PMPLL"}, /* Outputs */ - {"LOUT1", "NULL", "LOUT1 Mixer"}, - {"ROUT1", "NULL", "ROUT1 Mixer"}, - {"LOUT2", "NULL", "LOUT2 Mix Amp"}, - {"ROUT2", "NULL", "ROUT2 Mix Amp"}, - {"LOUT3", "NULL", "LOUT3 Mixer"}, - {"ROUT3", "NULL", "ROUT3 Mixer"}, + {"LOUT1", NULL, "LOUT1 Mixer"}, + {"ROUT1", NULL, "ROUT1 Mixer"}, + {"LOUT2", NULL, "LOUT2 Mix Amp"}, + {"ROUT2", NULL, "ROUT2 Mix Amp"}, + {"LOUT3", NULL, "LOUT3 Mixer"}, + {"ROUT3", NULL, "ROUT3 Mixer"}, {"LOUT1 Mixer", "DACL", "DAC Left"}, {"ROUT1 Mixer", "DACR", "DAC Right"}, {"LOUT2 Mixer", "DACHL", "DAC Left"}, {"ROUT2 Mixer", "DACHR", "DAC Right"}, - {"LOUT2 Mix Amp", "NULL", "LOUT2 Mixer"}, - {"ROUT2 Mix Amp", "NULL", "ROUT2 Mixer"}, + {"LOUT2 Mix Amp", NULL, "LOUT2 Mixer"}, + {"ROUT2 Mix Amp", NULL, "ROUT2 Mixer"}, {"LOUT3 Mixer", "DACSL", "DAC Left"}, {"ROUT3 Mixer", "DACSR", "DAC Right"}, @@ -381,18 +381,18 @@ static const struct snd_soc_dapm_route ak4671_intercon[] = { {"LIN2", NULL, "Mic Bias"}, {"RIN2", NULL, "Mic Bias"}, - {"ADC Left", "NULL", "LIN MUX"}, - {"ADC Right", "NULL", "RIN MUX"}, + {"ADC Left", NULL, "LIN MUX"}, + {"ADC Right", NULL, "RIN MUX"}, /* Analog Loops */ - {"LIN1 Mixing Circuit", "NULL", "LIN1"}, - {"RIN1 Mixing Circuit", "NULL", "RIN1"}, - {"LIN2 Mixing Circuit", "NULL", "LIN2"}, - {"RIN2 Mixing Circuit", "NULL", "RIN2"}, - {"LIN3 Mixing Circuit", "NULL", "LIN3"}, - {"RIN3 Mixing Circuit", "NULL", "RIN3"}, - {"LIN4 Mixing Circuit", "NULL", "LIN4"}, - {"RIN4 Mixing Circuit", "NULL", "RIN4"}, + {"LIN1 Mixing Circuit", NULL, "LIN1"}, + {"RIN1 Mixing Circuit", NULL, "RIN1"}, + {"LIN2 Mixing Circuit", NULL, "LIN2"}, + {"RIN2 Mixing Circuit", NULL, "RIN2"}, + {"LIN3 Mixing Circuit", NULL, "LIN3"}, + {"RIN3 Mixing Circuit", NULL, "RIN3"}, + {"LIN4 Mixing Circuit", NULL, "LIN4"}, + {"RIN4 Mixing Circuit", NULL, "RIN4"}, {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"}, {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"}, diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c index 79a4efcb894c..7d3a6accaf9a 100644 --- a/sound/soc/codecs/cs4271.c +++ b/sound/soc/codecs/cs4271.c @@ -286,7 +286,7 @@ static int cs4271_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = cs4271->deemph; + ucontrol->value.integer.value[0] = cs4271->deemph; return 0; } @@ -296,7 +296,7 @@ static int cs4271_put_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); - cs4271->deemph = ucontrol->value.enumerated.item[0]; + cs4271->deemph = ucontrol->value.integer.value[0]; return cs4271_set_deemph(codec); } diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c index ffe96175a8a5..911c26c705fc 100644 --- a/sound/soc/codecs/da732x.c +++ b/sound/soc/codecs/da732x.c @@ -876,11 +876,11 @@ static const struct snd_soc_dapm_widget da732x_dapm_widgets[] = { static const struct snd_soc_dapm_route da732x_dapm_routes[] = { /* Inputs */ - {"AUX1L PGA", "NULL", "AUX1L"}, - {"AUX1R PGA", "NULL", "AUX1R"}, + {"AUX1L PGA", NULL, "AUX1L"}, + {"AUX1R PGA", NULL, "AUX1R"}, {"MIC1 PGA", NULL, "MIC1"}, - {"MIC2 PGA", "NULL", "MIC2"}, - {"MIC3 PGA", "NULL", "MIC3"}, + {"MIC2 PGA", NULL, "MIC2"}, + {"MIC3 PGA", NULL, "MIC3"}, /* Capture Path */ {"ADC1 Left MUX", "MIC1", "MIC1 PGA"}, diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index f27325155ace..c5f35a07e8e4 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -120,7 +120,7 @@ static int es8328_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = es8328->deemph; + ucontrol->value.integer.value[0] = es8328->deemph; return 0; } @@ -129,7 +129,7 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct es8328_priv *es8328 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; int ret; if (deemph > 1) diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c index a722a023c262..477e13d30971 100644 --- a/sound/soc/codecs/pcm1681.c +++ b/sound/soc/codecs/pcm1681.c @@ -118,7 +118,7 @@ static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = priv->deemph; + ucontrol->value.integer.value[0] = priv->deemph; return 0; } @@ -129,7 +129,7 @@ static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); - priv->deemph = ucontrol->value.enumerated.item[0]; + priv->deemph = ucontrol->value.integer.value[0]; return pcm1681_set_deemph(codec); } diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index f374840a5a7c..9b541e52da8c 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c @@ -1198,7 +1198,7 @@ static struct dmi_system_id dmi_dell_dino[] = { .ident = "Dell Dino", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_BOARD_NAME, "0144P8") + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343") } }, { } diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index e182e6569bbd..3593a1496056 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1151,13 +1151,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec) /* Enable VDDC charge pump */ ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP; } else if (vddio >= 3100 && vdda >= 3100) { - /* - * if vddio and vddd > 3.1v, - * charge pump should be clean before set ana_pwr - */ - snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, - SGTL5000_VDDC_CHRGPMP_POWERUP, 0); - + ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP; /* VDDC use VDDIO rail */ lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD; lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO << diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c index 47b257e41809..82095d6cd070 100644 --- a/sound/soc/codecs/sn95031.c +++ b/sound/soc/codecs/sn95031.c @@ -538,8 +538,8 @@ static const struct snd_soc_dapm_route sn95031_audio_map[] = { /* speaker map */ { "IHFOUTL", NULL, "Speaker Rail"}, { "IHFOUTR", NULL, "Speaker Rail"}, - { "IHFOUTL", "NULL", "Speaker Left Playback"}, - { "IHFOUTR", "NULL", "Speaker Right Playback"}, + { "IHFOUTL", NULL, "Speaker Left Playback"}, + { "IHFOUTR", NULL, "Speaker Right Playback"}, { "Speaker Left Playback", NULL, "Speaker Left Filter"}, { "Speaker Right Playback", NULL, "Speaker Right Filter"}, { "Speaker Left Filter", NULL, "IHFDAC Left"}, diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c index 249ef5c4c762..32942bed34b1 100644 --- a/sound/soc/codecs/tas5086.c +++ b/sound/soc/codecs/tas5086.c @@ -281,7 +281,7 @@ static int tas5086_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = priv->deemph; + ucontrol->value.integer.value[0] = priv->deemph; return 0; } @@ -292,7 +292,7 @@ static int tas5086_put_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec); - priv->deemph = ucontrol->value.enumerated.item[0]; + priv->deemph = ucontrol->value.integer.value[0]; return tas5086_set_deemph(codec); } diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index 8d9de49a5052..21d5402e343f 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c @@ -610,7 +610,7 @@ static int wm2000_anc_mode_get(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); - ucontrol->value.enumerated.item[0] = wm2000->anc_active; + ucontrol->value.integer.value[0] = wm2000->anc_active; return 0; } @@ -620,7 +620,7 @@ static int wm2000_anc_mode_put(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); - int anc_active = ucontrol->value.enumerated.item[0]; + int anc_active = ucontrol->value.integer.value[0]; int ret; if (anc_active > 1) @@ -643,7 +643,7 @@ static int wm2000_speaker_get(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); - ucontrol->value.enumerated.item[0] = wm2000->spk_ena; + ucontrol->value.integer.value[0] = wm2000->spk_ena; return 0; } @@ -653,7 +653,7 @@ static int wm2000_speaker_put(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm2000_priv *wm2000 = dev_get_drvdata(codec->dev); - int val = ucontrol->value.enumerated.item[0]; + int val = ucontrol->value.integer.value[0]; int ret; if (val > 1) diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c index 098c143f44d6..c6d10533e2bd 100644 --- a/sound/soc/codecs/wm8731.c +++ b/sound/soc/codecs/wm8731.c @@ -125,7 +125,7 @@ static int wm8731_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = wm8731->deemph; + ucontrol->value.integer.value[0] = wm8731->deemph; return 0; } @@ -135,7 +135,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; int ret = 0; if (deemph > 1) diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index dde462c082be..04b04f8e147c 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -442,7 +442,7 @@ static int wm8903_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = wm8903->deemph; + ucontrol->value.integer.value[0] = wm8903->deemph; return 0; } @@ -452,7 +452,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; int ret = 0; if (deemph > 1) diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index d3b3f57668cc..215e93c1ddf0 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -525,7 +525,7 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = wm8904->deemph; + ucontrol->value.integer.value[0] = wm8904->deemph; return 0; } @@ -534,7 +534,7 @@ static int wm8904_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; if (deemph > 1) return -EINVAL; diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c index 1ab2d462afad..00bec915d652 100644 --- a/sound/soc/codecs/wm8955.c +++ b/sound/soc/codecs/wm8955.c @@ -393,7 +393,7 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = wm8955->deemph; + ucontrol->value.integer.value[0] = wm8955->deemph; return 0; } @@ -402,7 +402,7 @@ static int wm8955_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; if (deemph > 1) return -EINVAL; diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index cf8fecf97f2c..3035d9856415 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -184,7 +184,7 @@ static int wm8960_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); - ucontrol->value.enumerated.item[0] = wm8960->deemph; + ucontrol->value.integer.value[0] = wm8960->deemph; return 0; } @@ -193,7 +193,7 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol, { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec); - int deemph = ucontrol->value.enumerated.item[0]; + int deemph = ucontrol->value.integer.value[0]; if (deemph > 1) return -EINVAL; diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index 9517571e820d..98c9525bd751 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c @@ -180,7 +180,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol, struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); - unsigned int val = ucontrol->value.enumerated.item[0]; + unsigned int val = ucontrol->value.integer.value[0]; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int mixer, mask, shift, old; @@ -193,7 +193,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol, mutex_lock(&wm9712->lock); old = wm9712->hp_mixer[mixer]; - if (ucontrol->value.enumerated.item[0]) + if (ucontrol->value.integer.value[0]) wm9712->hp_mixer[mixer] |= mask; else wm9712->hp_mixer[mixer] &= ~mask; @@ -231,7 +231,7 @@ static int wm9712_hp_mixer_get(struct snd_kcontrol *kcontrol, mixer = mc->shift >> 8; shift = mc->shift & 0xff; - ucontrol->value.enumerated.item[0] = + ucontrol->value.integer.value[0] = (wm9712->hp_mixer[mixer] >> shift) & 1; return 0; diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c index 68222917b396..79552953e1bd 100644 --- a/sound/soc/codecs/wm9713.c +++ b/sound/soc/codecs/wm9713.c @@ -255,7 +255,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol, struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm); struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); - unsigned int val = ucontrol->value.enumerated.item[0]; + unsigned int val = ucontrol->value.integer.value[0]; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int mixer, mask, shift, old; @@ -268,7 +268,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol, mutex_lock(&wm9713->lock); old = wm9713->hp_mixer[mixer]; - if (ucontrol->value.enumerated.item[0]) + if (ucontrol->value.integer.value[0]) wm9713->hp_mixer[mixer] |= mask; else wm9713->hp_mixer[mixer] &= ~mask; @@ -306,7 +306,7 @@ static int wm9713_hp_mixer_get(struct snd_kcontrol *kcontrol, mixer = mc->shift >> 8; shift = mc->shift & 0xff; - ucontrol->value.enumerated.item[0] = + ucontrol->value.integer.value[0] = (wm9713->hp_mixer[mixer] >> shift) & 1; return 0; diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c index 75870c0ea2c9..91eb3aef7f02 100644 --- a/sound/soc/fsl/fsl_spdif.c +++ b/sound/soc/fsl/fsl_spdif.c @@ -1049,7 +1049,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv, enum spdif_txrate index, bool round) { const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; - bool is_sysclk = clk == spdif_priv->sysclk; + bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk); u64 rate_ideal, rate_actual, sub; u32 sysclk_dfmin, sysclk_dfmax; u32 txclk_df, sysclk_df, arate; @@ -1143,7 +1143,7 @@ static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv, spdif_priv->txclk_src[index], rate[index]); dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n", spdif_priv->txclk_df[index], rate[index]); - if (spdif_priv->txclk[index] == spdif_priv->sysclk) + if (clk_is_match(spdif_priv->txclk[index], spdif_priv->sysclk)) dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n", spdif_priv->sysclk_df[index], rate[index]); dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n", diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index b9fabbf69db6..6b0c8f717ec2 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -603,7 +603,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, factor = (div2 + 1) * (7 * psr + 1) * 2; for (i = 0; i < 255; i++) { - tmprate = freq * factor * (i + 2); + tmprate = freq * factor * (i + 1); if (baudclk_is_used) clkrate = clk_get_rate(ssi_private->baudclk); @@ -1227,7 +1227,7 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev, ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0; ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0; - ret = !of_property_read_u32_array(np, "dmas", dmas, 4); + ret = of_property_read_u32_array(np, "dmas", dmas, 4); if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) { ssi_private->use_dual_fifo = true; /* When using dual fifo mode, we need to keep watermark diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c index c42ffae5fe9f..402b728c0a06 100644 --- a/sound/soc/intel/sst-haswell-dsp.c +++ b/sound/soc/intel/sst-haswell-dsp.c @@ -207,9 +207,6 @@ static int hsw_parse_fw_image(struct sst_fw *sst_fw) module = (void *)module + sizeof(*module) + module->mod_size; } - /* allocate scratch mem regions */ - sst_block_alloc_scratch(dsp); - return 0; } diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c index 394af5684c05..863a9ca34b8e 100644 --- a/sound/soc/intel/sst-haswell-ipc.c +++ b/sound/soc/intel/sst-haswell-ipc.c @@ -1732,6 +1732,7 @@ static void sst_hsw_drop_all(struct sst_hsw *hsw) int sst_hsw_dsp_load(struct sst_hsw *hsw) { struct sst_dsp *dsp = hsw->dsp; + struct sst_fw *sst_fw, *t; int ret; dev_dbg(hsw->dev, "loading audio DSP...."); @@ -1748,12 +1749,17 @@ int sst_hsw_dsp_load(struct sst_hsw *hsw) return ret; } - ret = sst_fw_reload(hsw->sst_fw); - if (ret < 0) { - dev_err(hsw->dev, "error: SST FW reload failed\n"); - sst_dsp_dma_put_channel(dsp); - return -ENOMEM; + list_for_each_entry_safe_reverse(sst_fw, t, &dsp->fw_list, list) { + ret = sst_fw_reload(sst_fw); + if (ret < 0) { + dev_err(hsw->dev, "error: SST FW reload failed\n"); + sst_dsp_dma_put_channel(dsp); + return -ENOMEM; + } } + ret = sst_block_alloc_scratch(hsw->dsp); + if (ret < 0) + return -EINVAL; sst_dsp_dma_put_channel(dsp); return 0; @@ -1809,12 +1815,17 @@ int sst_hsw_dsp_runtime_suspend(struct sst_hsw *hsw) int sst_hsw_dsp_runtime_sleep(struct sst_hsw *hsw) { - sst_fw_unload(hsw->sst_fw); - sst_block_free_scratch(hsw->dsp); + struct sst_fw *sst_fw, *t; + struct sst_dsp *dsp = hsw->dsp; + + list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) { + sst_fw_unload(sst_fw); + } + sst_block_free_scratch(dsp); hsw->boot_complete = false; - sst_dsp_sleep(hsw->dsp); + sst_dsp_sleep(dsp); return 0; } @@ -1943,6 +1954,11 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata) goto fw_err; } + /* allocate scratch mem regions */ + ret = sst_block_alloc_scratch(hsw->dsp); + if (ret < 0) + goto boot_err; + /* wait for DSP boot completion */ sst_dsp_boot(hsw->dsp); ret = wait_event_timeout(hsw->boot_wait, hsw->boot_complete, diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index def7d8260c4e..d19483081f9b 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c @@ -579,7 +579,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev) if (PTR_ERR(priv->extclk) == -EPROBE_DEFER) return -EPROBE_DEFER; } else { - if (priv->extclk == priv->clk) { + if (clk_is_match(priv->extclk, priv->clk)) { devm_clk_put(&pdev->dev, priv->extclk); priv->extclk = ERR_PTR(-EINVAL); } else { diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 30579ca5bacb..e5c990889dcc 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -347,6 +347,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; + mutex_lock(&client_mutex); + list_for_each_entry(codec, &codec_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", codec->component.name); @@ -358,6 +360,8 @@ static ssize_t codec_list_read_file(struct file *file, char __user *user_buf, } } + mutex_unlock(&client_mutex); + if (ret >= 0) ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); @@ -382,6 +386,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf, if (!buf) return -ENOMEM; + mutex_lock(&client_mutex); + list_for_each_entry(component, &component_list, list) { list_for_each_entry(dai, &component->dai_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", @@ -395,6 +401,8 @@ static ssize_t dai_list_read_file(struct file *file, char __user *user_buf, } } + mutex_unlock(&client_mutex); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); @@ -418,6 +426,8 @@ static ssize_t platform_list_read_file(struct file *file, if (!buf) return -ENOMEM; + mutex_lock(&client_mutex); + list_for_each_entry(platform, &platform_list, list) { len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n", platform->component.name); @@ -429,6 +439,8 @@ static ssize_t platform_list_read_file(struct file *file, } } + mutex_unlock(&client_mutex); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); @@ -836,6 +848,8 @@ static struct snd_soc_component *soc_find_component( { struct snd_soc_component *component; + lockdep_assert_held(&client_mutex); + list_for_each_entry(component, &component_list, list) { if (of_node) { if (component->dev->of_node == of_node) @@ -854,6 +868,8 @@ static struct snd_soc_dai *snd_soc_find_dai( struct snd_soc_component *component; struct snd_soc_dai *dai; + lockdep_assert_held(&client_mutex); + /* Find CPU DAI from registered DAIs*/ list_for_each_entry(component, &component_list, list) { if (dlc->of_node && component->dev->of_node != dlc->of_node) @@ -1508,6 +1524,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card) struct snd_soc_codec *codec; int ret, i, order; + mutex_lock(&client_mutex); mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT); /* bind DAIs */ @@ -1662,6 +1679,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card) card->instantiated = 1; snd_soc_dapm_sync(&card->dapm); mutex_unlock(&card->mutex); + mutex_unlock(&client_mutex); return 0; @@ -1680,6 +1698,7 @@ card_probe_error: base_error: mutex_unlock(&card->mutex); + mutex_unlock(&client_mutex); return ret; } @@ -2713,13 +2732,6 @@ static void snd_soc_component_del_unlocked(struct snd_soc_component *component) list_del(&component->list); } -static void snd_soc_component_del(struct snd_soc_component *component) -{ - mutex_lock(&client_mutex); - snd_soc_component_del_unlocked(component); - mutex_unlock(&client_mutex); -} - int snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, @@ -2767,14 +2779,17 @@ void snd_soc_unregister_component(struct device *dev) { struct snd_soc_component *cmpnt; + mutex_lock(&client_mutex); list_for_each_entry(cmpnt, &component_list, list) { if (dev == cmpnt->dev && cmpnt->registered_as_component) goto found; } + mutex_unlock(&client_mutex); return; found: - snd_soc_component_del(cmpnt); + snd_soc_component_del_unlocked(cmpnt); + mutex_unlock(&client_mutex); snd_soc_component_cleanup(cmpnt); kfree(cmpnt); } @@ -2882,10 +2897,14 @@ struct snd_soc_platform *snd_soc_lookup_platform(struct device *dev) { struct snd_soc_platform *platform; + mutex_lock(&client_mutex); list_for_each_entry(platform, &platform_list, list) { - if (dev == platform->dev) + if (dev == platform->dev) { + mutex_unlock(&client_mutex); return platform; + } } + mutex_unlock(&client_mutex); return NULL; } @@ -3090,15 +3109,15 @@ void snd_soc_unregister_codec(struct device *dev) { struct snd_soc_codec *codec; + mutex_lock(&client_mutex); list_for_each_entry(codec, &codec_list, list) { if (dev == codec->dev) goto found; } + mutex_unlock(&client_mutex); return; found: - - mutex_lock(&client_mutex); list_del(&codec->list); snd_soc_component_del_unlocked(&codec->component); mutex_unlock(&client_mutex); diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 67d476548dcf..07f984d5f516 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, +{ + USB_DEVICE(0x0582, 0x0159), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + /* .vendor_name = "Roland", */ + /* .product_name = "UA-22", */ + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 2, + .type = QUIRK_MIDI_FIXED_ENDPOINT, + .data = & (const struct snd_usb_midi_endpoint_info) { + .out_cables = 0x0001, + .in_cables = 0x0001 + } + }, + { + .ifnum = -1 + } + } + } +}, /* this catches most recent vendor-specific Roland devices */ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 61bf9128e1f2..9d9db3b296dd 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -30,6 +30,8 @@ static int disasm_line__parse(char *line, char **namep, char **rawp); static void ins__delete(struct ins_operands *ops) { + if (ops == NULL) + return; zfree(&ops->source.raw); zfree(&ops->source.name); zfree(&ops->target.raw); diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index 3ed7c0476d48..2e2ba2efa0d9 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile @@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ) $(ECHO) " CC " $@ - $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@ + $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@ $(QUIET) $(STRIPCMD) $@ $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC) diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c index e238c9559caf..8d5d1d2ee7c1 100644 --- a/tools/testing/selftests/exec/execveat.c +++ b/tools/testing/selftests/exec/execveat.c @@ -30,7 +30,7 @@ static int execveat_(int fd, const char *path, char **argv, char **envp, #ifdef __NR_execveat return syscall(__NR_execveat, fd, path, argv, envp, flags); #else - errno = -ENOSYS; + errno = ENOSYS; return -1; #endif } @@ -234,6 +234,14 @@ static int run_tests(void) int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC); int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC); + /* Check if we have execveat at all, and bail early if not */ + errno = 0; + execveat_(-1, NULL, NULL, NULL, 0); + if (errno == ENOSYS) { + printf("[FAIL] ENOSYS calling execveat - no kernel support?\n"); + return 1; + } + /* Change file position to confirm it doesn't affect anything */ lseek(fd, 10, SEEK_SET); diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c index a0a7b5d1a070..f9b9c7c51372 100644 --- a/virt/kvm/arm/vgic-v2.c +++ b/virt/kvm/arm/vgic-v2.c @@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, { if (!(lr_desc.state & LR_STATE_MASK)) vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); + else + vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr); } static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) @@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; } +static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0; +} + static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) { u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; @@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = { .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, .get_elrsr = vgic_v2_get_elrsr, .get_eisr = vgic_v2_get_eisr, + .clear_eisr = vgic_v2_clear_eisr, .get_interrupt_status = vgic_v2_get_interrupt_status, .enable_underflow = vgic_v2_enable_underflow, .disable_underflow = vgic_v2_disable_underflow, diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c index 3a62d8a9a2c6..dff06021e748 100644 --- a/virt/kvm/arm/vgic-v3.c +++ b/virt/kvm/arm/vgic-v3.c @@ -104,6 +104,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, { if (!(lr_desc.state & LR_STATE_MASK)) vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); + else + vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr); } static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) @@ -116,6 +118,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu) return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; } +static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0; +} + static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) { u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; @@ -192,6 +199,7 @@ static const struct vgic_ops vgic_v3_ops = { .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, .get_elrsr = vgic_v3_get_elrsr, .get_eisr = vgic_v3_get_eisr, + .clear_eisr = vgic_v3_clear_eisr, .get_interrupt_status = vgic_v3_get_interrupt_status, .enable_underflow = vgic_v3_enable_underflow, .disable_underflow = vgic_v3_disable_underflow, diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 0cc6ab6005a0..c9f60f524588 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -883,6 +883,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) return vgic_ops->get_eisr(vcpu); } +static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu) +{ + vgic_ops->clear_eisr(vcpu); +} + static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) { return vgic_ops->get_interrupt_status(vcpu); @@ -922,6 +927,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) vgic_set_lr(vcpu, lr_nr, vlr); clear_bit(lr_nr, vgic_cpu->lr_used); vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; + vgic_sync_lr_elrsr(vcpu, lr_nr, vlr); } /* @@ -978,6 +984,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); vlr.state |= LR_STATE_PENDING; vgic_set_lr(vcpu, lr, vlr); + vgic_sync_lr_elrsr(vcpu, lr, vlr); return true; } } @@ -999,6 +1006,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) vlr.state |= LR_EOI_INT; vgic_set_lr(vcpu, lr, vlr); + vgic_sync_lr_elrsr(vcpu, lr, vlr); return true; } @@ -1136,6 +1144,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) if (status & INT_STATUS_UNDERFLOW) vgic_disable_underflow(vcpu); + /* + * In the next iterations of the vcpu loop, if we sync the vgic state + * after flushing it, but before entering the guest (this happens for + * pending signals and vmid rollovers), then make sure we don't pick + * up any old maintenance interrupts here. + */ + vgic_clear_eisr(vcpu); + return level_pending; } @@ -1583,8 +1599,10 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) * emulation. So check this here again. KVM_CREATE_DEVICE does * the proper checks already. */ - if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) - return -ENODEV; + if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) { + ret = -ENODEV; + goto out; + } /* * Any time a vcpu is run, vcpu_load is called which tries to grab the diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a1093700f3a4..a2214d9609bd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2492,6 +2492,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) case KVM_CAP_SIGNAL_MSI: #endif #ifdef CONFIG_HAVE_KVM_IRQFD + case KVM_CAP_IRQFD: case KVM_CAP_IRQFD_RESAMPLE: #endif case KVM_CAP_CHECK_EXTENSION_VM: |