diff options
295 files changed, 7699 insertions, 2102 deletions
@@ -2478,12 +2478,11 @@ S: D-90453 Nuernberg S: Germany N: Arnaldo Carvalho de Melo -W: http://oops.ghostprotocols.net:81/blog/ P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD 841A B6AB 4681 9224 DF01 -D: IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks +D: tools/, IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks S: Brazil N: Karsten Merker diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt index 95383c5131fc..8f427550720a 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt @@ -6,6 +6,7 @@ Required properties: - compatible: should be "rockchip,<name>-gamc" "rockchip,rk3228-gmac": found on RK322x SoCs "rockchip,rk3288-gmac": found on RK3288 SoCs + "rockchip,rk3328-gmac": found on RK3328 SoCs "rockchip,rk3366-gmac": found on RK3366 SoCs "rockchip,rk3368-gmac": found on RK3368 SoCs "rockchip,rk3399-gmac": found on RK3399 SoCs diff --git a/Documentation/filesystems/afs.txt b/Documentation/filesystems/afs.txt index ffef91c4e0d6..060da408923b 100644 --- a/Documentation/filesystems/afs.txt +++ b/Documentation/filesystems/afs.txt @@ -64,8 +64,7 @@ USAGE When inserting the driver modules the root cell must be specified along with a list of volume location server IP addresses: - modprobe af_rxrpc - modprobe rxkad + modprobe rxrpc modprobe kafs rootcell=cambridge.redhat.com:172.16.18.73:172.16.18.91 The first module is the AF_RXRPC network protocol driver. This provides the @@ -214,34 +213,3 @@ If a file is opened with a particular key and then the file descriptor is passed to a process that doesn't have that key (perhaps over an AF_UNIX socket), then the operations on the file will be made with key that was used to open the file. - - -======== -EXAMPLES -======== - -Here's what I use to test this. Some of the names and IP addresses are local -to my internal DNS. My "root.afs" partition has a mount point within it for -some public volumes volumes. - -insmod /tmp/rxrpc.o -insmod /tmp/rxkad.o -insmod /tmp/kafs.o rootcell=cambridge.redhat.com:172.16.18.91 - -mount -t afs \%root.afs. /afs -mount -t afs \%cambridge.redhat.com:root.cell. /afs/cambridge.redhat.com/ - -echo add grand.central.org 18.9.48.14:128.2.203.61:130.237.48.87 > /proc/fs/afs/cells -mount -t afs "#grand.central.org:root.cell." /afs/grand.central.org/ -mount -t afs "#grand.central.org:root.archive." /afs/grand.central.org/archive -mount -t afs "#grand.central.org:root.contrib." /afs/grand.central.org/contrib -mount -t afs "#grand.central.org:root.doc." /afs/grand.central.org/doc -mount -t afs "#grand.central.org:root.project." /afs/grand.central.org/project -mount -t afs "#grand.central.org:root.service." /afs/grand.central.org/service -mount -t afs "#grand.central.org:root.software." /afs/grand.central.org/software -mount -t afs "#grand.central.org:root.user." /afs/grand.central.org/user - -umount /afs -rmmod kafs -rmmod rxkad -rmmod rxrpc diff --git a/Documentation/media/uapi/v4l/pixfmt-007.rst b/Documentation/media/uapi/v4l/pixfmt-007.rst index 44bb5a7059b3..95a23a28c595 100644 --- a/Documentation/media/uapi/v4l/pixfmt-007.rst +++ b/Documentation/media/uapi/v4l/pixfmt-007.rst @@ -211,7 +211,13 @@ Colorspace sRGB (V4L2_COLORSPACE_SRGB) The :ref:`srgb` standard defines the colorspace used by most webcams and computer graphics. The default transfer function is ``V4L2_XFER_FUNC_SRGB``. The default Y'CbCr encoding is -``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full range. +``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited range. + +Note that the :ref:`sycc` standard specifies full range quantization, +however all current capture hardware supported by the kernel convert +R'G'B' to limited range Y'CbCr. So choosing full range as the default +would break how applications interpret the quantization range. + The chromaticities of the primary colors and the white reference are: @@ -276,7 +282,7 @@ the following ``V4L2_YCBCR_ENC_601`` encoding as defined by :ref:`sycc`: Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range [-0.5…0.5]. This transform is identical to one defined in SMPTE -170M/BT.601. The Y'CbCr quantization is full range. +170M/BT.601. The Y'CbCr quantization is limited range. .. _col-adobergb: @@ -288,10 +294,15 @@ The :ref:`adobergb` standard defines the colorspace used by computer graphics that use the AdobeRGB colorspace. This is also known as the :ref:`oprgb` standard. The default transfer function is ``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is -``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full -range. The chromaticities of the primary colors and the white reference -are: +``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited +range. + +Note that the :ref:`oprgb` standard specifies full range quantization, +however all current capture hardware supported by the kernel convert +R'G'B' to limited range Y'CbCr. So choosing full range as the default +would break how applications interpret the quantization range. +The chromaticities of the primary colors and the white reference are: .. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| @@ -344,7 +355,7 @@ the following ``V4L2_YCBCR_ENC_601`` encoding: Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range [-0.5…0.5]. This transform is identical to one defined in SMPTE -170M/BT.601. The Y'CbCr quantization is full range. +170M/BT.601. The Y'CbCr quantization is limited range. .. _col-bt2020: diff --git a/Documentation/networking/gtp.txt b/Documentation/networking/gtp.txt new file mode 100644 index 000000000000..93e96750f103 --- /dev/null +++ b/Documentation/networking/gtp.txt @@ -0,0 +1,135 @@ +The Linux kernel GTP tunneling module +====================================================================== +Documentation by Harald Welte <[email protected]> + +In 'drivers/net/gtp.c' you are finding a kernel-level implementation +of a GTP tunnel endpoint. + +== What is GTP == + +GTP is the Generic Tunnel Protocol, which is a 3GPP protocol used for +tunneling User-IP payload between a mobile station (phone, modem) +and the interconnection between an external packet data network (such +as the internet). + +So when you start a 'data connection' from your mobile phone, the +phone will use the control plane to signal for the establishment of +such a tunnel between that external data network and the phone. The +tunnel endpoints thus reside on the phone and in the gateway. All +intermediate nodes just transport the encapsulated packet. + +The phone itself does not implement GTP but uses some other +technology-dependent protocol stack for transmitting the user IP +payload, such as LLC/SNDCP/RLC/MAC. + +At some network element inside the cellular operator infrastructure +(SGSN in case of GPRS/EGPRS or classic UMTS, hNodeB in case of a 3G +femtocell, eNodeB in case of 4G/LTE), the cellular protocol stacking +is translated into GTP *without breaking the end-to-end tunnel*. So +intermediate nodes just perform some specific relay function. + +At some point the GTP packet ends up on the so-called GGSN (GSM/UMTS) +or P-GW (LTE), which terminates the tunnel, decapsulates the packet +and forwards it onto an external packet data network. This can be +public internet, but can also be any private IP network (or even +theoretically some non-IP network like X.25). + +You can find the protocol specification in 3GPP TS 29.060, available +publicly via the 3GPP website at http://www.3gpp.org/DynaReport/29060.htm + +A direct PDF link to v13.6.0 is provided for convenience below: +http://www.etsi.org/deliver/etsi_ts/129000_129099/129060/13.06.00_60/ts_129060v130600p.pdf + +== The Linux GTP tunnelling module == + +The module implements the function of a tunnel endpoint, i.e. it is +able to decapsulate tunneled IP packets in the uplink originated by +the phone, and encapsulate raw IP packets received from the external +packet network in downlink towards the phone. + +It *only* implements the so-called 'user plane', carrying the User-IP +payload, called GTP-U. It does not implement the 'control plane', +which is a signaling protocol used for establishment and teardown of +GTP tunnels (GTP-C). + +So in order to have a working GGSN/P-GW setup, you will need a +userspace program that implements the GTP-C protocol and which then +uses the netlink interface provided by the GTP-U module in the kernel +to configure the kernel module. + +This split architecture follows the tunneling modules of other +protocols, e.g. PPPoE or L2TP, where you also run a userspace daemon +to handle the tunnel establishment, authentication etc. and only the +data plane is accelerated inside the kernel. + +Don't be confused by terminology: The GTP User Plane goes through +kernel accelerated path, while the GTP Control Plane goes to +Userspace :) + +The official homepge of the module is at +https://osmocom.org/projects/linux-kernel-gtp-u/wiki + +== Userspace Programs with Linux Kernel GTP-U support == + +At the time of this writing, there are at least two Free Software +implementations that implement GTP-C and can use the netlink interface +to make use of the Linux kernel GTP-U support: + +* OpenGGSN (classic 2G/3G GGSN in C): + https://osmocom.org/projects/openggsn/wiki/OpenGGSN + +* ergw (GGSN + P-GW in Erlang): + https://github.com/travelping/ergw + +== Userspace Library / Command Line Utilities == + +There is a userspace library called 'libgtpnl' which is based on +libmnl and which implements a C-language API towards the netlink +interface provided by the Kernel GTP module: + +http://git.osmocom.org/libgtpnl/ + +== Protocol Versions == + +There are two different versions of GTP-U: v0 and v1. Both are +implemented in the Kernel GTP module. Version 0 is a legacy version, +and deprecated from recent 3GPP specifications. + +There are three versions of GTP-C: v0, v1, and v2. As the kernel +doesn't implement GTP-C, we don't have to worry about this. It's the +responsibility of the control plane implementation in userspace to +implement that. + +== IPv6 == + +The 3GPP specifications indicate either IPv4 or IPv6 can be used both +on the inner (user) IP layer, or on the outer (transport) layer. + +Unfortunately, the Kernel module currently supports IPv6 neither for +the User IP payload, nor for the outer IP layer. Patches or other +Contributions to fix this are most welcome! + +== Mailing List == + +If yo have questions regarding how to use the Kernel GTP module from +your own software, or want to contribute to the code, please use the +osmocom-net-grps mailing list for related discussion. The list can be +reached at [email protected] and the mailman +interface for managign your subscription is at +https://lists.osmocom.org/mailman/listinfo/osmocom-net-gprs + +== Issue Tracker == + +The Osmocom project maintains an issue tracker for the Kernel GTP-U +module at +https://osmocom.org/projects/linux-kernel-gtp-u/issues + +== History / Acknowledgements == + +The Module was originally created in 2012 by Harald Welte, but never +completed. Pablo came in to finish the mess Harald left behind. But +doe to a lack of user interest, it never got merged. + +In 2015, Andreas Schultz came to the rescue and fixed lots more bugs, +extended it with new features and finally pushed all of us to get it +mainline, where it was merged in 4.7.0. diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index b80fbd4e5575..2ebabc93014a 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -54,6 +54,18 @@ Values : 1 - enable JIT hardening for unprivileged users only 2 - enable JIT hardening for all users +bpf_jit_kallsyms +---------------- + +When Berkeley Packet Filter Just in Time compiler is enabled, then compiled +images are unknown addresses to the kernel, meaning they neither show up in +traces nor in /proc/kallsyms. This enables export of these addresses, which +can be used for debugging/tracing. If bpf_jit_harden is enabled, this feature +is disabled. +Values : + 0 - disable JIT kallsyms export (default value) + 1 - enable JIT kallsyms export for privileged users only + dev_weight -------------- diff --git a/MAINTAINERS b/MAINTAINERS index 62b0307a7ca4..d8f71f21fb88 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -877,8 +877,8 @@ S: Odd fixes F: drivers/hwmon/applesmc.c APPLETALK NETWORK LAYER -M: Arnaldo Carvalho de Melo <[email protected]> -S: Maintained +S: Odd fixes F: drivers/net/appletalk/ F: net/appletalk/ @@ -6748,9 +6748,8 @@ S: Odd Fixes F: drivers/tty/ipwireless/ IPX NETWORK LAYER -M: Arnaldo Carvalho de Melo <[email protected]> -S: Maintained +S: Odd fixes F: include/net/ipx.h F: include/uapi/linux/ipx.h F: net/ipx/ @@ -7522,8 +7521,8 @@ S: Maintained F: drivers/misc/lkdtm* LLC (802.2) -M: Arnaldo Carvalho de Melo <[email protected]> -S: Maintained +S: Odd fixes F: include/linux/llc.h F: include/uapi/linux/llc.h F: include/net/llc* @@ -13416,10 +13415,8 @@ S: Maintained F: drivers/input/misc/wistron_btns.c WL3501 WIRELESS PCMCIA CARD DRIVER -M: Arnaldo Carvalho de Melo <[email protected]> -W: http://oops.ghostprotocols.net:81/blog -S: Maintained +S: Odd fixes F: drivers/net/wireless/wl3501* WOLFSON MICROELECTRONICS DRIVERS @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc8 NAME = Fearless Coyote # *DOCUMENTATION* diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index b416abcbacd8..e5c7d9d8592a 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -825,6 +825,7 @@ CONFIG_QCOM_SMSM=y CONFIG_QCOM_WCNSS_CTRL=m CONFIG_ROCKCHIP_PM_DOMAINS=y CONFIG_COMMON_CLK_QCOM=y +CONFIG_QCOM_CLK_RPM=y CONFIG_CHROME_PLATFORMS=y CONFIG_STAGING_BOARD=y CONFIG_CROS_EC_CHARDEV=m diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 1f59ea051bab..b7e0125c0bbf 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -478,11 +478,10 @@ extern unsigned long __must_check arm_copy_from_user(void *to, const void __user *from, unsigned long n); static inline unsigned long __must_check -__copy_from_user(void *to, const void __user *from, unsigned long n) +__arch_copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned int __ua_flags; - check_object_size(to, n, false); __ua_flags = uaccess_save_and_enable(); n = arm_copy_from_user(to, from, n); uaccess_restore(__ua_flags); @@ -495,18 +494,15 @@ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); static inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) +__arch_copy_to_user(void __user *to, const void *from, unsigned long n) { #ifndef CONFIG_UACCESS_WITH_MEMCPY unsigned int __ua_flags; - - check_object_size(from, n, true); __ua_flags = uaccess_save_and_enable(); n = arm_copy_to_user(to, from, n); uaccess_restore(__ua_flags); return n; #else - check_object_size(from, n, true); return arm_copy_to_user(to, from, n); #endif } @@ -526,25 +522,49 @@ __clear_user(void __user *addr, unsigned long n) } #else -#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) -#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) +#define __arch_copy_from_user(to, from, n) \ + (memcpy(to, (void __force *)from, n), 0) +#define __arch_copy_to_user(to, from, n) \ + (memcpy((void __force *)to, from, n), 0) #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) #endif -static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) +static inline unsigned long __must_check +__copy_from_user(void *to, const void __user *from, unsigned long n) +{ + check_object_size(to, n, false); + return __arch_copy_from_user(to, from, n); +} + +static inline unsigned long __must_check +copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; + + check_object_size(to, n, false); + if (likely(access_ok(VERIFY_READ, from, n))) - res = __copy_from_user(to, from, n); + res = __arch_copy_from_user(to, from, n); if (unlikely(res)) memset(to + (n - res), 0, res); return res; } -static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) +static inline unsigned long __must_check +__copy_to_user(void __user *to, const void *from, unsigned long n) { + check_object_size(from, n, true); + + return __arch_copy_to_user(to, from, n); +} + +static inline unsigned long __must_check +copy_to_user(void __user *to, const void *from, unsigned long n) +{ + check_object_size(from, n, true); + if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); + n = __arch_copy_to_user(to, from, n); return n; } diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 8ecfd15c3a02..df73914e81c8 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -67,7 +67,7 @@ ENTRY(__get_user_4) ENDPROC(__get_user_4) ENTRY(__get_user_8) - check_uaccess r0, 8, r1, r2, __get_user_bad + check_uaccess r0, 8, r1, r2, __get_user_bad8 #ifdef CONFIG_THUMB2_KERNEL 5: TUSER(ldr) r2, [r0] 6: TUSER(ldr) r3, [r0, #4] diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index b2fc97a2c56c..05d12104d270 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -813,11 +813,6 @@ static inline void bpf_flush_icache(void *start, void *end) flush_icache_range((unsigned long)start, (unsigned long)end); } -void bpf_jit_compile(struct bpf_prog *prog) -{ - /* Nothing to do here. We support Internal BPF. */ -} - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_prog *tmp, *orig_prog = prog; @@ -915,18 +910,3 @@ out: tmp : orig_prog); return prog; } - -void bpf_jit_free(struct bpf_prog *prog) -{ - unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK; - struct bpf_binary_header *header = (void *)addr; - - if (!prog->jited) - goto free_filter; - - set_memory_rw(addr, header->pages); - bpf_jit_binary_free(header); - -free_filter: - bpf_prog_unlock_free(prog); -} diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 93abf8a9813d..8e1588021d1c 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -347,7 +347,8 @@ early_param("disable_radix", parse_disable_radix); void __init mmu_early_init_devtree(void) { /* Disable radix mode based on kernel command line. */ - if (disable_radix) + /* We don't yet have the machinery to do radix as a guest. */ + if (disable_radix || !(mfmsr() & MSR_HV)) cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; if (early_radix_enabled()) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 73a5cf18fd84..c34166ef76fc 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -961,8 +961,6 @@ common_load: return 0; } -void bpf_jit_compile(struct bpf_prog *fp) { } - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { u32 proglen; @@ -1066,6 +1064,7 @@ out: return fp; } +/* Overriding bpf_jit_free() as we don't set images read-only. */ void bpf_jit_free(struct bpf_prog *fp) { unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 167b31b186c1..f1d0e62ec1dd 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1263,14 +1263,6 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) } /* - * Classic BPF function stub. BPF programs will be converted into - * eBPF and then bpf_int_jit_compile() will be called. - */ -void bpf_jit_compile(struct bpf_prog *fp) -{ -} - -/* * Compile eBPF program "fp" */ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) @@ -1347,21 +1339,3 @@ out: tmp : orig_fp); return fp; } - -/* - * Free eBPF program - */ -void bpf_jit_free(struct bpf_prog *fp) -{ - unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; - struct bpf_binary_header *header = (void *)addr; - - if (!fp->jited) - goto free_filter; - - set_memory_rw(addr, header->pages); - bpf_jit_binary_free(header); - -free_filter: - bpf_prog_unlock_free(fp); -} diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1be64da0384e..e6cfe7ba2d65 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -104,6 +104,7 @@ struct cpuinfo_x86 { __u8 x86_phys_bits; /* CPUID returned core id bits: */ __u8 x86_coreid_bits; + __u8 cu_id; /* Max extended CPUID function supported: */ __u32 extended_cpuid_level; /* Maximum supported CPUID level, -1=no CPUID: */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1d3167269a67..2b4cf04239b6 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c) /* get information required for multi-node processors */ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { + u32 eax, ebx, ecx, edx; - node_id = cpuid_ecx(0x8000001e) & 7; + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); + + node_id = ecx & 0xff; + smp_num_siblings = ((ebx >> 8) & 0xff) + 1; + + if (c->x86 == 0x15) + c->cu_id = ebx & 0xff; + + if (c->x86 >= 0x17) { + c->cpu_core_id = ebx & 0xff; + + if (smp_num_siblings > 1) + c->x86_max_cores /= smp_num_siblings; + } /* * We may have multiple LLCs if L3 caches exist, so check if we diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9bab7a8a4293..ede03e849a8b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) c->x86_model_id[0] = '\0'; /* Unset */ c->x86_max_cores = 1; c->x86_coreid_bits = 0; + c->cu_id = 0xff; #ifdef CONFIG_X86_64 c->x86_clflush_size = 64; c->x86_phys_bits = 36; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 46732dc3b73c..99b920d0e516 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -433,9 +433,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) int cpu1 = c->cpu_index, cpu2 = o->cpu_index; if (c->phys_proc_id == o->phys_proc_id && - per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && - c->cpu_core_id == o->cpu_core_id) - return topology_sane(c, o, "smt"); + per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) { + if (c->cpu_core_id == o->cpu_core_id) + return topology_sane(c, o, "smt"); + + if ((c->cu_id != 0xff) && + (o->cu_id != 0xff) && + (c->cu_id == o->cu_id)) + return topology_sane(c, o, "smt"); + } } else if (c->phys_proc_id == o->phys_proc_id && c->cpu_core_id == o->cpu_core_id) { diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index e41af597aed8..37e7cf544e51 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1356,6 +1356,9 @@ void __init tsc_init(void) (unsigned long)cpu_khz / 1000, (unsigned long)cpu_khz % 1000); + /* Sanitize TSC ADJUST before cyc2ns gets initialized */ + tsc_store_and_check_tsc_adjust(true); + /* * Secondary CPUs do not run through tsc_init(), so set up * all the scale factors for all CPUs, assuming the same @@ -1386,8 +1389,6 @@ void __init tsc_init(void) if (unsynchronized_tsc()) mark_tsc_unstable("TSCs unsynchronized"); - else - tsc_store_and_check_tsc_adjust(true); check_system_tsc_reliable(); diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index d0db011051a5..728f75378475 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -286,13 +286,6 @@ void check_tsc_sync_source(int cpu) if (unsynchronized_tsc()) return; - if (tsc_clocksource_reliable) { - if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) - pr_info( - "Skipped synchronization checks as TSC is reliable.\n"); - return; - } - /* * Set the maximum number of test runs to * 1 if the CPU does not provide the TSC_ADJUST MSR @@ -380,14 +373,19 @@ void check_tsc_sync_target(void) int cpus = 2; /* Also aborts if there is no TSC. */ - if (unsynchronized_tsc() || tsc_clocksource_reliable) + if (unsynchronized_tsc()) return; /* * Store, verify and sanitize the TSC adjust register. If * successful skip the test. + * + * The test is also skipped when the TSC is marked reliable. This + * is true for SoCs which have no fallback clocksource. On these + * SoCs the TSC is frequency synchronized, but still the TSC ADJUST + * register might have been wreckaged by the BIOS.. */ - if (tsc_store_and_check_tsc_adjust(false)) { + if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) { atomic_inc(&skip_test); return; } diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index ec5d7545e6dc..0442d98367ae 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -160,11 +160,12 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) static void mark_screen_rdonly(struct mm_struct *mm) { + struct vm_area_struct *vma; + spinlock_t *ptl; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; - spinlock_t *ptl; int i; down_write(&mm->mmap_sem); @@ -177,7 +178,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) pmd = pmd_offset(pud, 0xA0000); if (pmd_trans_huge(*pmd)) { - struct vm_area_struct *vma = find_vma(mm, 0xA0000); + vma = find_vma(mm, 0xA0000); split_huge_pmd(vma, pmd, 0xA0000); } if (pmd_none_or_clear_bad(pmd)) diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index ea9c49adaa1f..8aa6bea1cd6c 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -15,6 +15,7 @@ #include <linux/debugfs.h> #include <linux/mm.h> #include <linux/init.h> +#include <linux/sched.h> #include <linux/seq_file.h> #include <asm/pgtable.h> @@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, } else note_page(m, &st, __pgprot(0), 1); + cond_resched(); start++; } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index bb660e53cbd6..18a62e208826 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1067,13 +1067,13 @@ common_load: ilen = prog - temp; if (ilen > BPF_MAX_INSN_SIZE) { - pr_err("bpf_jit_compile fatal insn size error\n"); + pr_err("bpf_jit: fatal insn size error\n"); return -EFAULT; } if (image) { if (unlikely(proglen + ilen > oldproglen)) { - pr_err("bpf_jit_compile fatal error\n"); + pr_err("bpf_jit: fatal error\n"); return -EFAULT; } memcpy(image + proglen, temp, ilen); @@ -1085,10 +1085,6 @@ common_load: return proglen; } -void bpf_jit_compile(struct bpf_prog *prog) -{ -} - struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_binary_header *header = NULL; @@ -1184,18 +1180,3 @@ out: tmp : orig_prog); return prog; } - -void bpf_jit_free(struct bpf_prog *fp) -{ - unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; - struct bpf_binary_header *header = (void *)addr; - - if (!fp->jited) - goto free_filter; - - set_memory_rw(addr, header->pages); - bpf_jit_binary_free(header); - -free_filter: - bpf_prog_unlock_free(fp); -} diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c73a6fcaeb9d..838f07e2b64a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3758,7 +3758,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, } #ifdef CONFIG_CFQ_GROUP_IOSCHED -static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) +static bool check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { struct cfq_data *cfqd = cic_to_cfqd(cic); struct cfq_queue *cfqq; @@ -3775,15 +3775,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) * spuriously on a newly created cic but there's no harm. */ if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr)) - return; - - /* - * If we have a non-root cgroup, we can depend on that to - * do proper throttling of writes. Turn off wbt for that - * case, if it was enabled by default. - */ - if (nonroot_cg) - wbt_disable_default(cfqd->queue); + return nonroot_cg; /* * Drop reference to queues. New queues will be assigned in new @@ -3804,9 +3796,13 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) } cic->blkcg_serial_nr = serial_nr; + return nonroot_cg; } #else -static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { } +static inline bool check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) +{ + return false; +} #endif /* CONFIG_CFQ_GROUP_IOSCHED */ static struct cfq_queue ** @@ -4448,11 +4444,12 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio, const int rw = rq_data_dir(rq); const bool is_sync = rq_is_sync(rq); struct cfq_queue *cfqq; + bool disable_wbt; spin_lock_irq(q->queue_lock); check_ioprio_changed(cic, bio); - check_blkcg_changed(cic, bio); + disable_wbt = check_blkcg_changed(cic, bio); new_queue: cfqq = cic_to_cfqq(cic, is_sync); if (!cfqq || cfqq == &cfqd->oom_cfqq) { @@ -4488,6 +4485,10 @@ new_queue: rq->elv.priv[0] = cfqq; rq->elv.priv[1] = cfqq->cfqg; spin_unlock_irq(q->queue_lock); + + if (disable_wbt) + wbt_disable_default(q); + return 0; } diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index aa644487749c..f59771da52ee 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1817,7 +1817,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) mgr->payloads[i].vcpi = req_payload.vcpi; } else if (mgr->payloads[i].num_slots) { mgr->payloads[i].num_slots = 0; - drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]); + drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]); req_payload.payload_state = mgr->payloads[i].payload_state; mgr->payloads[i].start_slot = 0; } diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index fb16070b266e..4a4f9533c53b 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -205,8 +205,8 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y) } if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) || - x >= (crtc->x + crtc->mode.crtc_hdisplay) || - y >= (crtc->y + crtc->mode.crtc_vdisplay)) + x >= (crtc->x + crtc->mode.hdisplay) || + y >= (crtc->y + crtc->mode.vdisplay)) goto out_of_bounds; x += xorigin; diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 6d81c56184d3..e9db857c6226 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -475,30 +475,28 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; - u32 ic_tar = 0; + u32 ic_con, ic_tar = 0; /* Disable the adapter */ __i2c_dw_enable_and_wait(dev, false); /* if the slave address is ten bit address, enable 10BITADDR */ - if (dev->dynamic_tar_update_enabled) { + ic_con = dw_readl(dev, DW_IC_CON); + if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { + ic_con |= DW_IC_CON_10BITADDR_MASTER; /* * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing - * mode has to be enabled via bit 12 of IC_TAR register, - * otherwise bit 4 of IC_CON is used. + * mode has to be enabled via bit 12 of IC_TAR register. + * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be + * detected from registers. */ - if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) - ic_tar = DW_IC_TAR_10BITADDR_MASTER; + ic_tar = DW_IC_TAR_10BITADDR_MASTER; } else { - u32 ic_con = dw_readl(dev, DW_IC_CON); - - if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) - ic_con |= DW_IC_CON_10BITADDR_MASTER; - else - ic_con &= ~DW_IC_CON_10BITADDR_MASTER; - dw_writel(dev, ic_con, DW_IC_CON); + ic_con &= ~DW_IC_CON_10BITADDR_MASTER; } + dw_writel(dev, ic_con, DW_IC_CON); + /* * Set the slave (target) address and enable 10-bit addressing mode * if applicable. @@ -963,7 +961,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) { struct i2c_adapter *adap = &dev->adapter; int r; - u32 reg; init_completion(&dev->cmd_complete); @@ -971,26 +968,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) if (r) return r; - r = i2c_dw_acquire_lock(dev); - if (r) - return r; - - /* - * Test if dynamic TAR update is enabled in this controller by writing - * to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this - * field is read-only so it should not succeed - */ - reg = dw_readl(dev, DW_IC_CON); - dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON); - - if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) == - (reg & DW_IC_CON_10BITADDR_MASTER)) { - dev->dynamic_tar_update_enabled = true; - dev_dbg(dev->dev, "Dynamic TAR update enabled"); - } - - i2c_dw_release_lock(dev); - snprintf(adap->name, sizeof(adap->name), "Synopsys DesignWare I2C adapter"); adap->retries = 3; diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 26250b425e2f..c1db3a5a340f 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -125,7 +125,6 @@ struct dw_i2c_dev { int (*acquire_lock)(struct dw_i2c_dev *dev); void (*release_lock)(struct dw_i2c_dev *dev); bool pm_runtime_disabled; - bool dynamic_tar_update_enabled; }; #define ACCESS_SWAP 0x00000001 diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index fa598f7f4372..1e1d0ad406f2 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1231,6 +1231,7 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0000", 0 }, { "ELAN0100", 0 }, { "ELAN0600", 0 }, + { "ELAN0605", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index 54a5e870a8f5..efbcf8435185 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c @@ -19,9 +19,9 @@ #include <linux/bitops.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/irqchip.h> -#include <linux/irqchip/chained_irq.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/mfd/syscon.h> @@ -39,6 +39,7 @@ struct keystone_irq_device { struct irq_domain *irqd; struct regmap *devctrl_regs; u32 devctrl_offset; + raw_spinlock_t wa_lock; }; static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq) @@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d) /* nothing to do here */ } -static void keystone_irq_handler(struct irq_desc *desc) +static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq) { - unsigned int irq = irq_desc_get_irq(desc); - struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); + struct keystone_irq_device *kirq = keystone_irq; + unsigned long wa_lock_flags; unsigned long pending; int src, virq; dev_dbg(kirq->dev, "start irq %d\n", irq); - chained_irq_enter(irq_desc_get_chip(desc), desc); - pending = keystone_irq_readl(kirq); keystone_irq_writel(kirq, pending); @@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc) if (!virq) dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n", src, virq); + raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags); generic_handle_irq(virq); + raw_spin_unlock_irqrestore(&kirq->wa_lock, + wa_lock_flags); } } - chained_irq_exit(irq_desc_get_chip(desc), desc); - dev_dbg(kirq->dev, "end irq %d\n", irq); + return IRQ_HANDLED; } static int keystone_irq_map(struct irq_domain *h, unsigned int virq, @@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev) return -ENODEV; } + raw_spin_lock_init(&kirq->wa_lock); + platform_set_drvdata(pdev, kirq); - irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq); + ret = request_irq(kirq->irq, keystone_irq_handler, + 0, dev_name(dev), kirq); + if (ret) { + irq_domain_remove(kirq->irqd); + return ret; + } /* clear all source bits */ keystone_irq_writel(kirq, ~0x0); @@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev) struct keystone_irq_device *kirq = platform_get_drvdata(pdev); int hwirq; + free_irq(kirq->irq, kirq); + for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++) irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq)); diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index 17304705f2cf..05fa9f7af53c 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = { .irq_ack = icoll_ack_irq, .irq_mask = icoll_mask_irq, .irq_unmask = icoll_unmask_irq, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SKIP_SET_WAKE, }; static struct irq_chip asm9260_icoll_chip = { .irq_ack = icoll_ack_irq, .irq_mask = asm9260_mask_irq, .irq_unmask = asm9260_unmask_irq, + .flags = IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SKIP_SET_WAKE, }; asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index 87a6b65ed3af..ccda41c2c9e4 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c @@ -612,8 +612,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, } memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len); if (msg->len == 1) { - if (cec_msg_initiator(msg) != 0xf || - cec_msg_destination(msg) == 0xf) { + if (cec_msg_destination(msg) == 0xf) { dprintk(1, "cec_transmit_msg: invalid poll message\n"); return -EINVAL; } @@ -638,7 +637,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, dprintk(1, "cec_transmit_msg: destination is the adapter itself\n"); return -EINVAL; } - if (cec_msg_initiator(msg) != 0xf && + if (msg->len > 1 && adap->is_configured && !cec_has_log_addr(adap, cec_msg_initiator(msg))) { dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n", cec_msg_initiator(msg)); @@ -1072,7 +1071,7 @@ static int cec_config_log_addr(struct cec_adapter *adap, /* Send poll message */ msg.len = 1; - msg.msg[0] = 0xf0 | log_addr; + msg.msg[0] = (log_addr << 4) | log_addr; err = cec_transmit_msg_fh(adap, &msg, NULL, true); /* diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c index a4dcaec31d02..8c1f926567ec 100644 --- a/drivers/media/usb/siano/smsusb.c +++ b/drivers/media/usb/siano/smsusb.c @@ -218,22 +218,30 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev) static int smsusb_sendrequest(void *context, void *buffer, size_t size) { struct smsusb_device_t *dev = (struct smsusb_device_t *) context; - struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer; - int dummy; + struct sms_msg_hdr *phdr; + int dummy, ret; if (dev->state != SMSUSB_ACTIVE) { pr_debug("Device not active yet\n"); return -ENOENT; } + phdr = kmalloc(size, GFP_KERNEL); + if (!phdr) + return -ENOMEM; + memcpy(phdr, buffer, size); + pr_debug("sending %s(%d) size: %d\n", smscore_translate_msg(phdr->msg_type), phdr->msg_type, phdr->msg_length); smsendian_handle_tx_message((struct sms_msg_data *) phdr); - smsendian_handle_message_header((struct sms_msg_hdr *)buffer); - return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), - buffer, size, &dummy, 1000); + smsendian_handle_message_header((struct sms_msg_hdr *)phdr); + ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), + phdr, size, &dummy, 1000); + + kfree(phdr); + return ret; } static char *smsusb1_fw_lkup[] = { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index b61b52f9da3d..0fccca075e29 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1706,10 +1706,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, err = mmc_select_hs400(card); if (err) goto free_card; - } else if (mmc_card_hs(card)) { + } else { /* Select the desired bus width optionally */ err = mmc_select_bus_width(card); - if (err > 0) { + if (err > 0 && mmc_card_hs(card)) { err = mmc_select_hs_ddr(card); if (err) goto free_card; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 823bc2fd201f..100fbdc9b95c 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -460,6 +460,9 @@ config XEN_NETDEV_BACKEND config VMXNET3 tristate "VMware VMXNET3 ethernet driver" depends on PCI && INET + depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \ + IA64_PAGE_SIZE_64KB || MICROBLAZE_64K_PAGES || \ + PARISC_PAGE_SIZE_64KB || PPC_64K_PAGES) help This driver supports VMware's vmxnet3 virtual ethernet NIC. To compile this driver as a module, choose M here: the diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d8c920be5e91..35f19430c84a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3163,12 +3163,6 @@ static void ena_remove(struct pci_dev *pdev) struct ena_com_dev *ena_dev; struct net_device *netdev; - if (!adapter) - /* This device didn't load properly and it's resources - * already released, nothing to do - */ - return; - ena_dev = adapter->ena_dev; netdev = adapter->netdev; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index dea9e9bbb8e7..fed6ac51559f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -123,7 +123,7 @@ void aq_ring_tx_append_buffs(struct aq_ring_s *self, } } -int aq_ring_tx_clean(struct aq_ring_s *self) +void aq_ring_tx_clean(struct aq_ring_s *self) { struct device *dev = aq_nic_get_dev(self->aq_nic); @@ -143,11 +143,6 @@ int aq_ring_tx_clean(struct aq_ring_s *self) if (unlikely(buff->is_eop)) dev_kfree_skb_any(buff->skb); } - - if (aq_ring_avail_dx(self) > AQ_CFG_SKB_FRAGS_MAX) - aq_nic_ndev_queue_start(self->aq_nic, self->idx); - - return 0; } static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, @@ -333,32 +328,6 @@ void aq_ring_rx_deinit(struct aq_ring_s *self) err_exit:; } -void aq_ring_tx_deinit(struct aq_ring_s *self) -{ - if (!self) - goto err_exit; - - for (; self->sw_head != self->sw_tail; - self->sw_head = aq_ring_next_dx(self, self->sw_head)) { - struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; - struct device *ndev = aq_nic_get_dev(self->aq_nic); - - if (likely(buff->is_mapped)) { - if (unlikely(buff->is_sop)) { - dma_unmap_single(ndev, buff->pa, buff->len, - DMA_TO_DEVICE); - } else { - dma_unmap_page(ndev, buff->pa, buff->len, - DMA_TO_DEVICE); - } - } - - if (unlikely(buff->is_eop)) - dev_kfree_skb_any(buff->skb); - } -err_exit:; -} - void aq_ring_free(struct aq_ring_s *self) { if (!self) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 0ac3f9e7bee6..fb296b3fa7fd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -144,13 +144,12 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg); int aq_ring_init(struct aq_ring_s *self); -void aq_ring_tx_deinit(struct aq_ring_s *self); void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self); void aq_ring_tx_append_buffs(struct aq_ring_s *ring, struct aq_ring_buff_s *buffer, unsigned int buffers); -int aq_ring_tx_clean(struct aq_ring_s *self); +void aq_ring_tx_clean(struct aq_ring_s *self); int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); int aq_ring_rx_fill(struct aq_ring_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index cb30a6396a70..ad5b4d4dac7f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -59,10 +59,14 @@ __acquires(&self->lock) } if (ring[AQ_VEC_TX_ID].sw_head != - ring[AQ_VEC_TX_ID].hw_head) { - err = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); - if (err < 0) - goto err_exit; + ring[AQ_VEC_TX_ID].hw_head) { + aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); + + if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) > + AQ_CFG_SKB_FRAGS_MAX) { + aq_nic_ndev_queue_start(self->aq_nic, + ring[AQ_VEC_TX_ID].idx); + } was_tx_cleaned = true; } @@ -271,7 +275,7 @@ void aq_vec_deinit(struct aq_vec_s *self) for (i = 0U, ring = self->ring[0]; self->tx_rings > i; ++i, ring = self->ring[i]) { - aq_ring_tx_deinit(&ring[AQ_VEC_TX_ID]); + aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]); } err_exit:; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 5ef60d4f12b4..d59cfcc4c4d5 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -144,7 +144,7 @@ static int bgmac_probe(struct bcma_device *core) goto err; } - ether_addr_copy(bgmac->mac_addr, mac); + ether_addr_copy(bgmac->net_dev->dev_addr, mac); /* On BCM4706 we need common core to access PHY */ if (core->id.id == BCMA_CORE_4706_MAC_GBIT && diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 805e6ed6c390..7b1af950f312 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -169,7 +169,7 @@ static int bgmac_probe(struct platform_device *pdev) mac_addr = of_get_mac_address(np); if (mac_addr) - ether_addr_copy(bgmac->mac_addr, mac_addr); + ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr); else dev_warn(&pdev->dev, "MAC address not present in device tree\n"); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 20fe2520da42..415046750bb4 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1477,14 +1477,13 @@ int bgmac_enet_probe(struct bgmac *bgmac) net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); - if (!is_valid_ether_addr(bgmac->mac_addr)) { + if (!is_valid_ether_addr(net_dev->dev_addr)) { dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", - bgmac->mac_addr); - eth_random_addr(bgmac->mac_addr); + net_dev->dev_addr); + eth_hw_addr_random(net_dev); dev_warn(bgmac->dev, "Using random MAC: %pM\n", - bgmac->mac_addr); + net_dev->dev_addr); } - ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr); /* This (reset &) enable is not preset in specs or reference driver but * Broadcom does it in arch PCI code when enabling fake PCI device. diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index ab2db76e4fb8..248727dc62f2 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -474,7 +474,6 @@ struct bgmac { struct device *dev; struct device *dma_dev; - unsigned char mac_addr[ETH_ALEN]; u32 feature_flags; struct net_device *net_dev; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8ba3dc2e236c..afb0967d2ce6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4907,8 +4907,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) "continuing\n"); adapter->params.offload = 0; } else { - adapter->tc_u32 = cxgb4_init_tc_u32(adapter, - CXGB4_MAX_LINK_HANDLE); + adapter->tc_u32 = cxgb4_init_tc_u32(adapter); if (!adapter->tc_u32) dev_warn(&pdev->dev, "could not offload tc u32, continuing\n"); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 52af62e0ecb6..a1b19422b339 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -437,28 +437,26 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap) t4_free_mem(adap->tc_u32); } -struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap, - unsigned int size) +struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) { + unsigned int max_tids = adap->tids.nftids; struct cxgb4_tc_u32_table *t; unsigned int i; - if (!size) + if (!max_tids) return NULL; t = t4_alloc_mem(sizeof(*t) + - (size * sizeof(struct cxgb4_link))); + (max_tids * sizeof(struct cxgb4_link))); if (!t) return NULL; - t->size = size; + t->size = max_tids; for (i = 0; i < t->size; i++) { struct cxgb4_link *link = &t->table[i]; unsigned int bmap_size; - unsigned int max_tids; - max_tids = adap->tids.nftids; bmap_size = BITS_TO_LONGS(max_tids); link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size); if (!link->tid_map) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h index 6bdc885eff22..021261a41c13 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h @@ -37,8 +37,6 @@ #include <net/pkt_cls.h> -#define CXGB4_MAX_LINK_HANDLE 32 - static inline bool can_tc_u32_offload(struct net_device *dev) { struct adapter *adap = netdev2adap(dev); @@ -52,6 +50,5 @@ int cxgb4_delete_knode(struct net_device *dev, __be16 protocol, struct tc_cls_u32_offload *cls); void cxgb4_cleanup_tc_u32(struct adapter *adapter); -struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap, - unsigned int size); +struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap); #endif /* __CXGB4_TC_U32_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 36105b6837cb..d0868c2320da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -591,7 +591,6 @@ void t4_uld_mem_free(struct adapter *adap) void t4_uld_clean_up(struct adapter *adap) { - struct sge_uld_rxq_info *rxq_info; unsigned int i; if (!adap->uld) @@ -599,7 +598,6 @@ void t4_uld_clean_up(struct adapter *adap) for (i = 0; i < CXGB4_ULD_MAX; i++) { if (!adap->uld[i].handle) continue; - rxq_info = adap->sge.uld_rxq_info[i]; if (adap->flags & FULL_INIT_DONE) quiesce_rx_uld(adap, i); if (adap->flags & USING_MSIX) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index ecf3ccc257bc..a323185507ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -169,6 +169,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/ CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/ + CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */ /* T6 adapters: */ @@ -185,6 +188,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6011), CH_PCI_ID_TABLE_FENTRY(0x6014), CH_PCI_ID_TABLE_FENTRY(0x6015), + CH_PCI_ID_TABLE_FENTRY(0x6080), + CH_PCI_ID_TABLE_FENTRY(0x6081), CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index f18aba05f1c2..23d82748f52b 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1031,7 +1031,6 @@ static int ethoc_probe(struct platform_device *pdev) struct ethoc *priv = NULL; int num_bd; int ret = 0; - bool random_mac = false; struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev); u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0; @@ -1169,16 +1168,11 @@ static int ethoc_probe(struct platform_device *pdev) /* Check the MAC again for validity, if it still isn't choose and * program a random one. */ - if (!is_valid_ether_addr(netdev->dev_addr)) { - eth_random_addr(netdev->dev_addr); - random_mac = true; - } + if (!is_valid_ether_addr(netdev->dev_addr)) + eth_hw_addr_random(netdev); ethoc_do_set_mac_address(netdev); - if (random_mac) - netdev->addr_assign_type = NET_ADDR_RANDOM; - /* Allow the platform setup code to adjust MII management bus clock. */ if (!eth_clkfreq) { struct clk *clk = devm_clk_get(&pdev->dev, NULL); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 25a14a3fe784..bc5a3347fd4a 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1666,7 +1666,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, free_buffers: /* compensate sw bpool counter changes */ - for (i--; i > 0; i--) { + for (i--; i >= 0; i--) { dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); if (dpaa_bp) { count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2cc552ddd8f0..91a16641e851 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2910,6 +2910,7 @@ static void set_multicast_list(struct net_device *ndev) struct netdev_hw_addr *ha; unsigned int i, bit, data, crc, tmp; unsigned char hash; + unsigned int hash_high = 0, hash_low = 0; if (ndev->flags & IFF_PROMISC) { tmp = readl(fep->hwp + FEC_R_CNTRL); @@ -2932,11 +2933,7 @@ static void set_multicast_list(struct net_device *ndev) return; } - /* Clear filter and add the addresses in hash register - */ - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - + /* Add the addresses in hash register */ netdev_for_each_mc_addr(ha, ndev) { /* calculate crc32 value of mac address */ crc = 0xffffffff; @@ -2954,16 +2951,14 @@ static void set_multicast_list(struct net_device *ndev) */ hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; - if (hash > 31) { - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - tmp |= 1 << (hash - 32); - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - } else { - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); - tmp |= 1 << hash; - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - } + if (hash > 31) + hash_high |= 1 << (hash - 32); + else + hash_low |= 1 << hash; } + + writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); } /* Set a MAC change in hardware. */ diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index c88918c4c5f3..84ea130eed36 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -337,7 +337,7 @@ struct fman_mac { u8 mac_id; u32 exceptions; bool ptp_tsu_enabled; - bool en_tsu_err_exeption; + bool en_tsu_err_exception; struct dtsec_cfg *dtsec_drv_param; void *fm; struct fman_rev_info fm_rev_info; @@ -1247,12 +1247,12 @@ int dtsec_set_exception(struct fman_mac *dtsec, switch (exception) { case FM_MAC_EX_1G_1588_TS_RX_ERR: if (enable) { - dtsec->en_tsu_err_exeption = true; + dtsec->en_tsu_err_exception = true; iowrite32be(ioread32be(®s->tmr_pemask) | TMR_PEMASK_TSREEN, ®s->tmr_pemask); } else { - dtsec->en_tsu_err_exeption = false; + dtsec->en_tsu_err_exception = false; iowrite32be(ioread32be(®s->tmr_pemask) & ~TMR_PEMASK_TSREEN, ®s->tmr_pemask); @@ -1420,7 +1420,7 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params) dtsec->event_cb = params->event_cb; dtsec->dev_id = params->dev_id; dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en; - dtsec->en_tsu_err_exeption = dtsec->dtsec_drv_param->ptp_exception_en; + dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en; dtsec->fm = params->fm; dtsec->basex_if = params->basex_if; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c46935d4a3fe..9198e6bd5160 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -189,9 +189,10 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, } ltb->map_id = adapter->map_id; adapter->map_id++; + + init_completion(&adapter->fw_done); send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); - init_completion(&adapter->fw_done); wait_for_completion(&adapter->fw_done); return 0; } @@ -505,7 +506,7 @@ rx_pool_alloc_failed: adapter->rx_pool = NULL; rx_pool_arr_alloc_failed: for (i = 0; i < adapter->req_rx_queues; i++) - napi_enable(&adapter->napi[i]); + napi_disable(&adapter->napi[i]); alloc_napi_failed: return -ENOMEM; } @@ -1126,10 +1127,10 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); crq.request_statistics.len = cpu_to_be32(sizeof(struct ibmvnic_statistics)); - ibmvnic_send_crq(adapter, &crq); /* Wait for data to be written */ init_completion(&adapter->stats_done); + ibmvnic_send_crq(adapter, &crq); wait_for_completion(&adapter->stats_done); for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) @@ -1259,8 +1260,6 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) } adapter->rx_scrq = NULL; } - - adapter->requested_caps = 0; } static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter) @@ -1282,8 +1281,6 @@ static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter) adapter->rx_scrq[i]); adapter->rx_scrq = NULL; } - - adapter->requested_caps = 0; } static int disable_scrq_irq(struct ibmvnic_adapter *adapter, @@ -1501,7 +1498,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) adapter->req_rx_queues = adapter->opt_rx_comp_queues; adapter->req_rx_add_queues = adapter->max_rx_add_queues; - adapter->req_mtu = adapter->max_mtu; + adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; } total_queues = adapter->req_tx_queues + adapter->req_rx_queues; @@ -1571,30 +1568,36 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); crq.request_capability.number = cpu_to_be64(adapter->req_tx_entries_per_subcrq); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_MTU); crq.request_capability.number = cpu_to_be64(adapter->req_mtu); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); if (adapter->netdev->flags & IFF_PROMISC) { @@ -1602,12 +1605,14 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); crq.request_capability.number = cpu_to_be64(1); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); } } else { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); crq.request_capability.number = cpu_to_be64(0); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); } @@ -1959,112 +1964,112 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter) { union ibmvnic_crq crq; - atomic_set(&adapter->running_cap_queries, 0); + atomic_set(&adapter->running_cap_crqs, 0); memset(&crq, 0, sizeof(crq)); crq.query_capability.first = IBMVNIC_CRQ_CMD; crq.query_capability.cmd = QUERY_CAPABILITY; crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MIN_MTU); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_MTU); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); - atomic_inc(&adapter->running_cap_queries); + atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); } @@ -2190,12 +2195,12 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq, if (!found) { dev_err(dev, "Couldn't find error id %x\n", - crq->request_error_rsp.error_id); + be32_to_cpu(crq->request_error_rsp.error_id)); return; } dev_err(dev, "Detailed info for error id %x:", - crq->request_error_rsp.error_id); + be32_to_cpu(crq->request_error_rsp.error_id)); for (i = 0; i < error_buff->len; i++) { pr_cont("%02x", (int)error_buff->buff[i]); @@ -2274,8 +2279,8 @@ static void handle_error_indication(union ibmvnic_crq *crq, dev_err(dev, "Firmware reports %serror id %x, cause %d\n", crq->error_indication. flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "", - crq->error_indication.error_id, - crq->error_indication.error_cause); + be32_to_cpu(crq->error_indication.error_id), + be16_to_cpu(crq->error_indication.error_cause)); error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); if (!error_buff) @@ -2352,6 +2357,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, u64 *req_value; char *name; + atomic_dec(&adapter->running_cap_crqs); switch (be16_to_cpu(crq->request_capability_rsp.capability)) { case REQ_TX_QUEUES: req_value = &adapter->req_tx_queues; @@ -2393,10 +2399,10 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, case PARTIALSUCCESS: dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", *req_value, - (long int)be32_to_cpu(crq->request_capability_rsp. + (long int)be64_to_cpu(crq->request_capability_rsp. number), name); release_sub_crqs_no_irqs(adapter); - *req_value = be32_to_cpu(crq->request_capability_rsp.number); + *req_value = be64_to_cpu(crq->request_capability_rsp.number); init_sub_crqs(adapter, 1); return; default: @@ -2406,12 +2412,13 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, } /* Done receiving requested capabilities, query IP offload support */ - if (++adapter->requested_caps == 7) { + if (atomic_read(&adapter->running_cap_crqs) == 0) { union ibmvnic_crq newcrq; int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = &adapter->ip_offload_buf; + adapter->wait_capability = false; adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, buf_sz, DMA_FROM_DEVICE); @@ -2547,9 +2554,9 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, struct device *dev = &adapter->vdev->dev; long rc; - atomic_dec(&adapter->running_cap_queries); + atomic_dec(&adapter->running_cap_crqs); netdev_dbg(netdev, "Outstanding queries: %d\n", - atomic_read(&adapter->running_cap_queries)); + atomic_read(&adapter->running_cap_crqs)); rc = crq->query_capability.rc.code; if (rc) { dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); @@ -2631,12 +2638,12 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, break; case MIN_MTU: adapter->min_mtu = be64_to_cpu(crq->query_capability.number); - netdev->min_mtu = adapter->min_mtu; + netdev->min_mtu = adapter->min_mtu - ETH_HLEN; netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); break; case MAX_MTU: adapter->max_mtu = be64_to_cpu(crq->query_capability.number); - netdev->max_mtu = adapter->max_mtu; + netdev->max_mtu = adapter->max_mtu - ETH_HLEN; netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); break; case MAX_MULTICAST_FILTERS: @@ -2707,9 +2714,11 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq, } out: - if (atomic_read(&adapter->running_cap_queries) == 0) + if (atomic_read(&adapter->running_cap_crqs) == 0) { + adapter->wait_capability = false; init_sub_crqs(adapter, 0); /* We're done querying the capabilities, initialize sub-crqs */ + } } static void handle_control_ras_rsp(union ibmvnic_crq *crq, @@ -2804,9 +2813,9 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator; crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok); crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size; - ibmvnic_send_crq(adapter, &crq); init_completion(&adapter->fw_done); + ibmvnic_send_crq(adapter, &crq); wait_for_completion(&adapter->fw_done); if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) @@ -3419,6 +3428,18 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, static irqreturn_t ibmvnic_interrupt(int irq, void *instance) { struct ibmvnic_adapter *adapter = instance; + unsigned long flags; + + spin_lock_irqsave(&adapter->crq.lock, flags); + vio_disable_interrupts(adapter->vdev); + tasklet_schedule(&adapter->tasklet); + spin_unlock_irqrestore(&adapter->crq.lock, flags); + return IRQ_HANDLED; +} + +static void ibmvnic_tasklet(void *data) +{ + struct ibmvnic_adapter *adapter = data; struct ibmvnic_crq_queue *queue = &adapter->crq; struct vio_dev *vdev = adapter->vdev; union ibmvnic_crq *crq; @@ -3440,11 +3461,19 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance) ibmvnic_handle_crq(crq, adapter); crq->generic.first = 0; } else { - done = true; + /* remain in tasklet until all + * capabilities responses are received + */ + if (!adapter->wait_capability) + done = true; } } + /* if capabilities CRQ's were sent in this tasklet, the following + * tasklet must wait until all responses are received + */ + if (atomic_read(&adapter->running_cap_crqs) != 0) + adapter->wait_capability = true; spin_unlock_irqrestore(&queue->lock, flags); - return IRQ_HANDLED; } static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) @@ -3499,6 +3528,7 @@ static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter) netdev_dbg(adapter->netdev, "Releasing CRQ\n"); free_irq(vdev->irq, adapter); + tasklet_kill(&adapter->tasklet); do { rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); @@ -3544,6 +3574,9 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter) retrc = 0; + tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet, + (unsigned long)adapter); + netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, adapter); @@ -3565,6 +3598,7 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter) return retrc; req_irq_failed: + tasklet_kill(&adapter->tasklet); do { rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); @@ -3586,9 +3620,9 @@ static int ibmvnic_dump_show(struct seq_file *seq, void *v) memset(&crq, 0, sizeof(crq)); crq.request_dump_size.first = IBMVNIC_CRQ_CMD; crq.request_dump_size.cmd = REQUEST_DUMP_SIZE; - ibmvnic_send_crq(adapter, &crq); init_completion(&adapter->fw_done); + ibmvnic_send_crq(adapter, &crq); wait_for_completion(&adapter->fw_done); seq_write(seq, adapter->dump_data, adapter->dump_data_size); @@ -3634,8 +3668,8 @@ static void handle_crq_init_rsp(struct work_struct *work) } } - send_version_xchg(adapter); reinit_completion(&adapter->init_done); + send_version_xchg(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Passive init timeout\n"); goto task_failed; @@ -3645,9 +3679,9 @@ static void handle_crq_init_rsp(struct work_struct *work) if (adapter->renegotiate) { adapter->renegotiate = false; release_sub_crqs_no_irqs(adapter); - send_cap_queries(adapter); reinit_completion(&adapter->init_done); + send_cap_queries(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Passive init timeout\n"); @@ -3661,9 +3695,7 @@ static void handle_crq_init_rsp(struct work_struct *work) goto task_failed; netdev->real_num_tx_queues = adapter->req_tx_queues; - netdev->mtu = adapter->req_mtu; - netdev->min_mtu = adapter->min_mtu; - netdev->max_mtu = adapter->max_mtu; + netdev->mtu = adapter->req_mtu - ETH_HLEN; if (adapter->failover) { adapter->failover = false; @@ -3777,9 +3809,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->debugfs_dump = ent; } } - ibmvnic_send_crq_init(adapter); init_completion(&adapter->init_done); + ibmvnic_send_crq_init(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) return 0; @@ -3787,9 +3819,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) if (adapter->renegotiate) { adapter->renegotiate = false; release_sub_crqs_no_irqs(adapter); - send_cap_queries(adapter); reinit_completion(&adapter->init_done); + send_cap_queries(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) return 0; @@ -3803,7 +3835,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev->real_num_tx_queues = adapter->req_tx_queues; - netdev->mtu = adapter->req_mtu; + netdev->mtu = adapter->req_mtu - ETH_HLEN; rc = register_netdev(netdev); if (rc) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index dd775d951b73..422824f1f42a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -976,11 +976,11 @@ struct ibmvnic_adapter { dma_addr_t login_rsp_buf_token; int login_rsp_buf_sz; - atomic_t running_cap_queries; + atomic_t running_cap_crqs; + bool wait_capability; struct ibmvnic_sub_crq_queue **tx_scrq; struct ibmvnic_sub_crq_queue **rx_scrq; - int requested_caps; bool renegotiate; /* rx structs */ @@ -1049,5 +1049,6 @@ struct ibmvnic_adapter { struct work_struct vnic_crq_init; struct work_struct ibmvnic_xport; + struct tasklet_struct tasklet; bool failover; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7a23d3e47c6f..82d8040fa418 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -348,6 +348,8 @@ struct i40e_pf { #define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) #define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52) #define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53) +#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(54) +#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index b2101a51534c..451f48b7540a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -538,6 +538,8 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { __le16 command_flags; +#define I40E_AQC_MC_MAG_EN 0x0100 +#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200 #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index fc73e4ef27ac..ece57d6a6e23 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -300,7 +300,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len; u8 *buf = (u8 *)buffer; - u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; @@ -328,12 +327,18 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if (buf_len < len) len = buf_len; /* write the full 16-byte chunks */ - for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); - /* write whatever's left over without overrunning the buffer */ - if (i < len) - i40e_debug(hw, mask, "\t0x%04X %*ph\n", - i, len - i, buf + i); + if (hw->debug_mask & mask) { + char prefix[20]; + + snprintf(prefix, 20, + "i40e %02x:%02x.%x: \t0x", + hw->bus.bus_id, + hw->bus.device, + hw->bus.func); + + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); + } } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index f1f41f12902f..267ad2588255 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -974,7 +974,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, struct i40e_dcbx_config *r_cfg = &pf->hw.remote_dcbx_config; int i, ret; - u32 switch_id; + u16 switch_id; bw_data = kzalloc(sizeof( struct i40e_aqc_query_port_ets_config_resp), @@ -986,7 +986,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, vsi = pf->vsi[pf->lan_vsi]; switch_id = - vsi->info.switch_id & I40E_AQ_VSI_SW_ID_MASK; + le16_to_cpu(vsi->info.switch_id) & + I40E_AQ_VSI_SW_ID_MASK; ret = i40e_aq_query_port_ets_config(&pf->hw, switch_id, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e83a8ca5dd65..e8a8351c8ea9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -77,7 +77,6 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, @@ -1254,6 +1253,7 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, struct hlist_head *tmp_del_list, int vlan_filters) { + s16 pvid = le16_to_cpu(vsi->info.pvid); struct i40e_mac_filter *f, *add_head; struct i40e_new_mac_filter *new; struct hlist_node *h; @@ -1275,8 +1275,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, /* Update the filters about to be added in place */ hlist_for_each_entry(new, tmp_add_list, hlist) { - if (vsi->info.pvid && new->f->vlan != vsi->info.pvid) - new->f->vlan = vsi->info.pvid; + if (pvid && new->f->vlan != pvid) + new->f->vlan = pvid; else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY) new->f->vlan = 0; else if (!vlan_filters && new->f->vlan == 0) @@ -1290,12 +1290,12 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, * order to avoid duplicating code for adding the new filter * then deleting the old filter. */ - if ((vsi->info.pvid && f->vlan != vsi->info.pvid) || + if ((pvid && f->vlan != pvid) || (vlan_filters && f->vlan == I40E_VLAN_ANY) || (!vlan_filters && f->vlan == 0)) { /* Determine the new vlan we will be adding */ - if (vsi->info.pvid) - new_vlan = vsi->info.pvid; + if (pvid) + new_vlan = pvid; else if (vlan_filters) new_vlan = 0; else @@ -1447,18 +1447,20 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f) if (!f) return; + /* If the filter was never added to firmware then we can just delete it + * directly and we don't want to set the status to remove or else an + * admin queue command will unnecessarily fire. + */ if ((f->state == I40E_FILTER_FAILED) || (f->state == I40E_FILTER_NEW)) { - /* this one never got added by the FW. Just remove it, - * no need to sync anything. - */ hash_del(&f->hlist); kfree(f); } else { f->state = I40E_FILTER_REMOVE; - vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } + + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } /** @@ -4683,8 +4685,10 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) */ if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { + local_bh_disable(); if (napi_reschedule(&tx_ring->q_vector->napi)) tx_ring->tx_stats.tx_lost_interrupt++; + local_bh_enable(); } } @@ -6350,7 +6354,16 @@ static void i40e_link_event(struct i40e_pf *pf) old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); status = i40e_get_link_status(&pf->hw, &new_link); - if (status) { + + /* On success, disable temp link polling */ + if (status == I40E_SUCCESS) { + if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING) + pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING; + } else { + /* Enable link polling temporarily until i40e_get_link_status + * returns I40E_SUCCESS + */ + pf->flags |= I40E_FLAG_TEMP_LINK_POLLING; dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", status); return; @@ -6402,7 +6415,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) return; pf->service_timer_previous = jiffies; - if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) + if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) || + (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)) i40e_link_event(pf); /* Update the stats for active netdevs so the network stack @@ -8813,16 +8827,17 @@ static int i40e_sw_init(struct i40e_pf *pf) } #endif /* CONFIG_PCI_IOV */ if (pf->hw.mac.type == I40E_MAC_X722) { - pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | - I40E_FLAG_128_QP_RSS_CAPABLE | - I40E_FLAG_HW_ATR_EVICT_CAPABLE | - I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | - I40E_FLAG_WB_ON_ITR_CAPABLE | - I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | - I40E_FLAG_NO_PCI_LINK_CHECK | - I40E_FLAG_USE_SET_LLDP_MIB | - I40E_FLAG_GENEVE_OFFLOAD_CAPABLE | - I40E_FLAG_PTP_L4_CAPABLE; + pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE + | I40E_FLAG_128_QP_RSS_CAPABLE + | I40E_FLAG_HW_ATR_EVICT_CAPABLE + | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE + | I40E_FLAG_WB_ON_ITR_CAPABLE + | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE + | I40E_FLAG_NO_PCI_LINK_CHECK + | I40E_FLAG_USE_SET_LLDP_MIB + | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE + | I40E_FLAG_PTP_L4_CAPABLE + | I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE; } else if ((pf->hw.aq.api_maj_ver > 1) || ((pf->hw.aq.api_maj_ver == 1) && (pf->hw.aq.api_min_ver > 4))) { @@ -10758,7 +10773,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_pf_config_rss(pf); /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw); i40e_link_event(pf); /* Initialize user-specific link properties */ @@ -11739,6 +11753,53 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) } /** + * i40e_enable_mc_magic_wake - enable multicast magic packet wake up + * using the mac_address_write admin q function + * @pf: pointer to i40e_pf struct + **/ +static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + i40e_status ret; + u8 mac_addr[6]; + u16 flags = 0; + + /* Get current MAC address in case it's an LAA */ + if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { + ether_addr_copy(mac_addr, + pf->vsi[pf->lan_vsi]->netdev->dev_addr); + } else { + dev_err(&pf->pdev->dev, + "Failed to retrieve MAC address; using default\n"); + ether_addr_copy(mac_addr, hw->mac.addr); + } + + /* The FW expects the mac address write cmd to first be called with + * one of these flags before calling it again with the multicast + * enable flags. + */ + flags = I40E_AQC_WRITE_TYPE_LAA_WOL; + + if (hw->func_caps.flex10_enable && hw->partition_id != 1) + flags = I40E_AQC_WRITE_TYPE_LAA_ONLY; + + ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up"); + return; + } + + flags = I40E_AQC_MC_MAG_EN + | I40E_AQC_WOL_PRESERVE_ON_PFR + | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG; + ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); + if (ret) + dev_err(&pf->pdev->dev, + "Failed to enable Multicast Magic Packet wake up\n"); +} + +/** * i40e_shutdown - PCI callback for shutting down * @pdev: PCI device information struct **/ @@ -11760,6 +11821,9 @@ static void i40e_shutdown(struct pci_dev *pdev) cancel_work_sync(&pf->service_task); i40e_fdir_teardown(pf); + if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) + i40e_enable_mc_magic_wake(pf); + rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); @@ -11791,6 +11855,9 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); + if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) + i40e_enable_mc_magic_wake(pf); + rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 09f09ea7a5e5..97d46058d71d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1864,14 +1864,14 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) /* a small macro to shorten up some long lines */ #define INTREG I40E_PFINT_DYN_CTLN -static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_rx_itr(struct i40e_vsi *vsi, int idx) { - return !!(vsi->rx_rings[idx]->rx_itr_setting); + return vsi->rx_rings[idx]->rx_itr_setting; } -static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_tx_itr(struct i40e_vsi *vsi, int idx) { - return !!(vsi->tx_rings[idx]->tx_itr_setting); + return vsi->tx_rings[idx]->tx_itr_setting; } /** @@ -1897,8 +1897,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); - rx_itr_setting = get_rx_itr_enabled(vsi, idx); - tx_itr_setting = get_tx_itr_enabled(vsi, idx); + rx_itr_setting = get_rx_itr(vsi, idx); + tx_itr_setting = get_tx_itr(vsi, idx); if (q_vector->itr_countdown > 0 || (!ITR_IS_DYNAMIC(rx_itr_setting) && @@ -2335,7 +2335,8 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* remove payload length from outer checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.udp->check, htonl(paylen)); + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); } /* reset pointers to inner headers */ @@ -2356,7 +2357,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index b5a59dd72a0c..89dfdbca13db 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -304,7 +304,6 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u8 *buf = (u8 *)buffer; - u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; @@ -332,12 +331,18 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if (buf_len < len) len = buf_len; /* write the full 16-byte chunks */ - for (i = 0; i < (len - 16); i += 16) - i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i); - /* write whatever's left over without overrunning the buffer */ - if (i < len) - i40e_debug(hw, mask, "\t0x%04X %*ph\n", - i, len - i, buf + i); + if (hw->debug_mask & mask) { + char prefix[20]; + + snprintf(prefix, 20, + "i40evf %02x:%02x.%x: \t0x", + hw->bus.bus_id, + hw->bus.device, + hw->bus.func); + + print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, + 16, 1, buf, len, false); + } } } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index b758846d4dc5..c91fcf43ccbc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1324,18 +1324,18 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) /* a small macro to shorten up some long lines */ #define INTREG I40E_VFINT_DYN_CTLN1 -static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_rx_itr(struct i40e_vsi *vsi, int idx) { struct i40evf_adapter *adapter = vsi->back; - return !!(adapter->rx_rings[idx].rx_itr_setting); + return adapter->rx_rings[idx].rx_itr_setting; } -static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx) +static inline int get_tx_itr(struct i40e_vsi *vsi, int idx) { struct i40evf_adapter *adapter = vsi->back; - return !!(adapter->tx_rings[idx].tx_itr_setting); + return adapter->tx_rings[idx].tx_itr_setting; } /** @@ -1361,8 +1361,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, */ rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); - rx_itr_setting = get_rx_itr_enabled(vsi, idx); - tx_itr_setting = get_tx_itr_enabled(vsi, idx); + rx_itr_setting = get_rx_itr(vsi, idx); + tx_itr_setting = get_tx_itr(vsi, idx); if (q_vector->itr_countdown > 0 || (!ITR_IS_DYNAMIC(rx_itr_setting) && @@ -1629,7 +1629,8 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* remove payload length from outer checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.udp->check, htonl(paylen)); + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); } /* reset pointers to inner headers */ @@ -1650,7 +1651,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 920c1cb06a92..f35dcaac5bb7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2153,6 +2153,11 @@ static int i40evf_close(struct net_device *netdev) adapter->state = __I40EVF_DOWN_PENDING; i40evf_free_traffic_irqs(adapter); + /* We explicitly don't free resources here because the hardware is + * still active and can DMA into memory. Resources are cleared in + * i40evf_virtchnl_completion() after we get confirmation from the PF + * driver that the rings have been stopped. + */ return 0; } @@ -2871,7 +2876,8 @@ static void i40evf_remove(struct pci_dev *pdev) i40evf_request_reset(adapter); msleep(50); } - + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); i40evf_misc_irq_disable(adapter); i40evf_free_misc_irq(adapter); i40evf_reset_interrupt_capability(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e83444c34cf9..a2cc43d28888 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -91,6 +91,14 @@ #define IXGBE_RXBUFFER_4K 4096 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#if (PAGE_SIZE < 8192) +#define IXGBE_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD) +#else +#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K +#endif + /* * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, @@ -104,6 +112,9 @@ /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define IXGBE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + enum ixgbe_tx_flags { /* cmd_type flags */ IXGBE_TX_FLAGS_HW_VLAN = 0x01, @@ -192,7 +203,12 @@ struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; struct page *page; - unsigned int page_offset; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; }; struct ixgbe_queue_stats { @@ -218,15 +234,20 @@ struct ixgbe_rx_queue_stats { #define IXGBE_TS_HDR_LEN 8 enum ixgbe_ring_state_t { + __IXGBE_RX_3K_BUFFER, + __IXGBE_RX_BUILD_SKB_ENABLED, + __IXGBE_RX_RSC_ENABLED, + __IXGBE_RX_CSUM_UDP_ZERO_ERR, + __IXGBE_RX_FCOE, __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, - __IXGBE_RX_RSC_ENABLED, - __IXGBE_RX_CSUM_UDP_ZERO_ERR, - __IXGBE_RX_FCOE, }; +#define ring_uses_build_skb(ring) \ + test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) + struct ixgbe_fwd_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct net_device *netdev; @@ -336,19 +357,20 @@ struct ixgbe_ring_feature { */ static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) { -#ifdef IXGBE_FCOE - if (test_bit(__IXGBE_RX_FCOE, &ring->state)) - return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : - IXGBE_RXBUFFER_3K; + if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + return IXGBE_RXBUFFER_3K; +#if (PAGE_SIZE < 8192) + if (ring_uses_build_skb(ring)) + return IXGBE_MAX_FRAME_BUILD_SKB; #endif return IXGBE_RXBUFFER_2K; } static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) { -#ifdef IXGBE_FCOE - if (test_bit(__IXGBE_RX_FCOE, &ring->state)) - return (PAGE_SIZE < 8192) ? 1 : 0; +#if (PAGE_SIZE < 8192) + if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + return 1; #endif return 0; } @@ -539,6 +561,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) #define IXGBE_FLAG2_EEE_CAPABLE BIT(14) #define IXGBE_FLAG2_EEE_ENABLED BIT(15) +#define IXGBE_FLAG2_RX_LEGACY BIT(16) /* Tx fast path data */ int num_tx_queues; @@ -751,7 +774,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info; extern const struct ixgbe_info ixgbe_x550em_a_info; extern const struct ixgbe_info ixgbe_x550em_a_fw_info; #ifdef CONFIG_IXGBE_DCB -extern const struct dcbnl_rtnl_ops dcbnl_ops; +extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; #endif extern char ixgbe_driver_name[]; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index b8fc3cfec831..78c52375acc6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -777,7 +777,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) return err ? 1 : 0; } -const struct dcbnl_rtnl_ops dcbnl_ops = { +const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = { .ieee_getets = ixgbe_dcbnl_ieee_getets, .ieee_setets = ixgbe_dcbnl_ieee_setets, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index ee28e54a4f75..a7574c7b12af 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -151,6 +151,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { }; #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN +static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) + /* currently supported speeds for 10G */ #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ SUPPORTED_10000baseKX4_Full | \ @@ -340,6 +347,9 @@ static int ixgbe_get_settings(struct net_device *netdev, case IXGBE_LINK_SPEED_10GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_10000); break; + case IXGBE_LINK_SPEED_5GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_5000); + break; case IXGBE_LINK_SPEED_2_5GB_FULL: ethtool_cmd_speed_set(ecmd, SPEED_2500); break; @@ -998,6 +1008,8 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN; } static void ixgbe_get_ringparam(struct net_device *netdev, @@ -1137,6 +1149,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) return IXGBE_TEST_LEN; case ETH_SS_STATS: return IXGBE_STATS_LEN; + case ETH_SS_PRIV_FLAGS: + return IXGBE_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } @@ -1261,6 +1275,9 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, } /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, ixgbe_priv_flags_strings, + IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); } } @@ -1865,7 +1882,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, tx_ntc = tx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); - while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { + while (rx_desc->wb.upper.length) { /* check Rx buffer */ rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; @@ -1887,7 +1904,16 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, /* unmap buffer on Tx side */ tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); /* increment Rx/Tx next to clean counters */ rx_ntc++; @@ -3342,6 +3368,37 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) return 0; } +static u32 ixgbe_get_priv_flags(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + unsigned int flags2 = adapter->flags2; + + flags2 &= ~IXGBE_FLAG2_RX_LEGACY; + if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) + flags2 |= IXGBE_FLAG2_RX_LEGACY; + + if (flags2 != adapter->flags2) { + adapter->flags2 = flags2; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + } + + return 0; +} + static const struct ethtool_ops ixgbe_ethtool_ops = { .get_settings = ixgbe_get_settings, .set_settings = ixgbe_set_settings, @@ -3378,6 +3435,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_eee = ixgbe_set_eee, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, + .get_priv_flags = ixgbe_get_priv_flags, + .set_priv_flags = ixgbe_set_priv_flags, .get_ts_info = ixgbe_get_ts_info, .get_module_info = ixgbe_get_module_info, .get_module_eeprom = ixgbe_get_module_eeprom, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 86135c00d4b1..060cdce8058f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -72,7 +72,7 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "4.4.0-k" +#define DRV_VERSION "5.0.0-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2016 Intel Corporation."; @@ -945,28 +945,6 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, } } -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, - struct ixgbe_tx_buffer *tx_buffer) -{ - if (tx_buffer->skb) { - dev_kfree_skb_any(tx_buffer->skb); - if (dma_unmap_len(tx_buffer, len)) - dma_unmap_single(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } else if (dma_unmap_len(tx_buffer, len)) { - dma_unmap_page(ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - } - tx_buffer->next_to_watch = NULL; - tx_buffer->skb = NULL; - dma_unmap_len_set(tx_buffer, len, 0); - /* tx_buffer must be completely set up in the transmit path */ -} - static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -1198,7 +1176,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, DMA_TO_DEVICE); /* clear tx_buffer data */ - tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); /* unmap remaining buffers */ @@ -1552,6 +1529,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, } } +static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; +} + static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *bi) { @@ -1570,8 +1552,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, } /* map page for use */ - dma = dma_map_page(rx_ring->dev, page, 0, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); /* * if mapping failed free memory back to system since @@ -1586,7 +1570,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, bi->dma = dma; bi->page = page; - bi->page_offset = 0; + bi->page_offset = ixgbe_rx_offset(rx_ring); + bi->pagecnt_bias = 1; return true; } @@ -1601,6 +1586,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; + u16 bufsz; /* nothing to do */ if (!cleaned_count) @@ -1610,10 +1596,17 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) bi = &rx_ring->rx_buffer_info[i]; i -= rx_ring->count; + bufsz = ixgbe_rx_bufsz(rx_ring); + do { if (!ixgbe_alloc_mapped_page(rx_ring, bi)) break; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + /* * Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. @@ -1629,8 +1622,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) i -= rx_ring->count; } - /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.upper.status_error = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); @@ -1832,19 +1825,19 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, { /* if the page was released unmap it, else just sync our portion */ if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; + dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); } else { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, frag->page_offset, - ixgbe_rx_bufsz(rx_ring), + skb_frag_size(frag), DMA_FROM_DEVICE); } - IXGBE_CB(skb)->dma = 0; } /** @@ -1880,7 +1873,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, } /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) + if (!skb_headlen(skb)) ixgbe_pull_tail(rx_ring, skb); #ifdef IXGBE_FCOE @@ -1915,14 +1908,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, nta++; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - /* transfer page from old buffer to new buffer */ - *new_buff = *old_buff; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, - new_buff->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; } static inline bool ixgbe_page_is_reserved(struct page *page) @@ -1930,6 +1923,43 @@ static inline bool ixgbe_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } +static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote pages */ + if (unlikely(ixgbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + return false; +#else + /* The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define IXGBE_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) + if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(!pagecnt_bias)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + /** * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on @@ -1945,144 +1975,172 @@ static inline bool ixgbe_page_is_reserved(struct page *page) * The function will then update the page offset if necessary and return * true if the buffer can be reused by the adapter. **/ -static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, +static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int size) { - struct page *page = rx_buffer->page; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) - unsigned int truesize = ixgbe_rx_bufsz(rx_ring); + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); - unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - - ixgbe_rx_bufsz(rx_ring); + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : + SKB_DATA_ALIGN(size); #endif - - if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; - - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is not reserved, we can reuse buffer as-is */ - if (likely(!ixgbe_page_is_reserved(page))) - return true; - - /* this page cannot be reused so discard it */ - __free_pages(page, ixgbe_rx_pg_order(rx_ring)); - return false; - } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, size, truesize); - - /* avoid re-using remote pages */ - if (unlikely(ixgbe_page_is_reserved(page))) - return false; - #if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; - - /* flip page offset to other buffer */ rx_buffer->page_offset ^= truesize; #else - /* move offset up to the next cache line */ rx_buffer->page_offset += truesize; - - if (rx_buffer->page_offset > last_offset) - return false; #endif - - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - page_ref_inc(page); - - return true; } -static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc) +static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff **skb, + const unsigned int size) { struct ixgbe_rx_buffer *rx_buffer; - struct sk_buff *skb; - struct page *page; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; - page = rx_buffer->page; - prefetchw(page); + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; - skb = rx_buffer->skb; + /* Delay unmapping of the first packet. It carries the header + * information, HW may still access the header after the writeback. + * Only unmap it when EOP is reached + */ + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) { + if (!*skb) + goto skip_sync; + } else { + if (*skb) + ixgbe_dma_sync_frag(rx_ring, *skb); + } - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +skip_sync: + rx_buffer->pagecnt_bias--; - /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); -#endif + return rx_buffer; +} - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, - IXGBE_RX_HDR_SIZE); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - return NULL; +static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + if (ixgbe_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else { + if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); } + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } - /* - * we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} - /* - * Delay unmapping of the first packet. It carries the - * header information, HW may still access the header - * after the writeback. Only unmap it when EOP is - * reached - */ - if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) - goto dma_sync; +static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + struct sk_buff *skb; - IXGBE_CB(skb)->dma = rx_buffer->dma; - } else { - if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) - ixgbe_dma_sync_frag(rx_ring, skb); + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif -dma_sync: - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; - rx_buffer->skb = NULL; - } + if (size > IXGBE_RX_HDR_SIZE) { + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + IXGBE_CB(skb)->dma = rx_buffer->dma; - /* pull page into skb */ - if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { - /* hand second half of page back to the ring */ - ixgbe_reuse_rx_page(rx_ring, rx_buffer); - } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { - /* the page has been released from the ring */ - IXGBE_CB(skb)->page_released = true; + skb_add_rx_frag(skb, 0, rx_buffer->page, + rx_buffer->page_offset, + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; } - /* clear contents of buffer_info */ - rx_buffer->page = NULL; + return skb; +} + +static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(IXGBE_SKB_PAD + size); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb to around the page buffer */ + skb = build_skb(va - IXGBE_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, IXGBE_SKB_PAD); + __skb_put(skb, size); + + /* record DMA address if this is the start of a chain of buffers */ + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + IXGBE_CB(skb)->dma = rx_buffer->dma; + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif return skb; } @@ -2114,7 +2172,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; + unsigned int size; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { @@ -2123,8 +2183,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); - - if (!rx_desc->wb.upper.status_error) + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) break; /* This memory barrier is needed to keep us from reading @@ -2133,13 +2193,26 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ dma_rmb(); + rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); + /* retrieve a buffer from the ring */ - skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); + if (skb) + ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) + skb = ixgbe_build_skb(rx_ring, rx_buffer, + rx_desc, size); + else + skb = ixgbe_construct_skb(rx_ring, rx_buffer, + rx_desc, size); /* exit if we failed to retrieve a buffer */ - if (!skb) + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; break; + } + ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); cleaned_count++; /* place incomplete frames back on ring for completion */ @@ -3197,6 +3270,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); + /* reinitialize tx_buffer_info */ + memset(ring->tx_buffer_info, 0, + sizeof(struct ixgbe_tx_buffer) * ring->count); + /* enable queue */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); @@ -3367,7 +3444,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ - srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) + srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -3668,6 +3748,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; + union ixgbe_adv_rx_desc *rx_desc; u64 rdba = ring->dma; u32 rxdctl; u8 reg_idx = ring->reg_idx; @@ -3700,8 +3781,27 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, */ rxdctl &= ~0x3FFFFF; rxdctl |= 0x080420; +#if (PAGE_SIZE < 8192) + } else { + rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | + IXGBE_RXDCTL_RLPML_EN); + + /* Limit the maximum frame size so we don't overrun the skb */ + if (ring_uses_build_skb(ring) && + !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) + rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB | + IXGBE_RXDCTL_RLPML_EN; +#endif } + /* initialize rx_buffer_info */ + memset(ring->rx_buffer_info, 0, + sizeof(struct ixgbe_rx_buffer) * ring->count); + + /* initialize Rx descriptor 0 */ + rx_desc = IXGBE_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + /* enable receive descriptor ring */ rxdctl |= IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); @@ -3838,10 +3938,30 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { rx_ring = adapter->rx_ring[i]; + + clear_ring_rsc_enabled(rx_ring); + clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_ring_rsc_enabled(rx_ring); - else - clear_ring_rsc_enabled(rx_ring); + + if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + + clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); + if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) + continue; + + set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#if (PAGE_SIZE < 8192) + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); + + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) + set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); +#endif } } @@ -4855,45 +4975,47 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) **/ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) { - struct device *dev = rx_ring->dev; - unsigned long size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buffer_info) - return; + u16 i = rx_ring->next_to_clean; + struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; - + while (i != rx_ring->next_to_alloc) { if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; if (IXGBE_CB(skb)->page_released) - dma_unmap_page(dev, - IXGBE_CB(skb)->dma, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); + dma_unmap_page_attrs(rx_ring->dev, + IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); dev_kfree_skb(skb); - rx_buffer->skb = NULL; } - if (!rx_buffer->page) - continue; + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); - dma_unmap_page(dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + IXGBE_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); - rx_buffer->page = NULL; + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } } - size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_buffer_info, 0, size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -5362,28 +5484,57 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) **/ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) { - struct ixgbe_tx_buffer *tx_buffer_info; - unsigned long size; - u16 i; + u16 i = tx_ring->next_to_clean; + struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; - /* ring already cleared, nothing to do */ - if (!tx_ring->tx_buffer_info) - return; + while (i != tx_ring->next_to_use) { + union ixgbe_adv_tx_desc *eop_desc, *tx_desc; - /* Free all the Tx ring sk_buffs */ - for (i = 0; i < tx_ring->count; i++) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - } + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); - netdev_tx_reset_queue(txring_txq(tx_ring)); + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); - size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; - memset(tx_ring->tx_buffer_info, 0, size); + /* check for eop_desc to determine the end of the packet */ + eop_desc = tx_buffer->next_to_watch; + tx_desc = IXGBE_TX_DESC(tx_ring, i); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } - /* Zero out the descriptor ring */ - memset(tx_ring->desc, 0, tx_ring->size); + /* reset BQL for queue */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; } @@ -5829,9 +5980,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) if (tx_ring->q_vector) ring_node = tx_ring->q_vector->numa_node; - tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); + tx_ring->tx_buffer_info = vmalloc_node(size, ring_node); if (!tx_ring->tx_buffer_info) - tx_ring->tx_buffer_info = vzalloc(size); + tx_ring->tx_buffer_info = vmalloc(size); if (!tx_ring->tx_buffer_info) goto err; @@ -5913,9 +6064,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) if (rx_ring->q_vector) ring_node = rx_ring->q_vector->numa_node; - rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); + rx_ring->rx_buffer_info = vmalloc_node(size, ring_node); if (!rx_ring->rx_buffer_info) - rx_ring->rx_buffer_info = vzalloc(size); + rx_ring->rx_buffer_info = vmalloc(size); if (!rx_ring->rx_buffer_info) goto err; @@ -7630,18 +7781,32 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, return; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); + tx_buffer = &tx_ring->tx_buffer_info[i]; /* clear dma mappings for failed tx_buffer_info map */ - for (;;) { + while (tx_buffer != first) { + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + if (i--) + i += tx_ring->count; tx_buffer = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); - if (tx_buffer == first) - break; - if (i == 0) - i = tx_ring->count; - i--; } + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + dev_kfree_skb_any(first->skb); + first->skb = NULL; + tx_ring->next_to_use = i; } @@ -9687,7 +9852,7 @@ skip_sriov: #ifdef CONFIG_IXGBE_DCB if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) - netdev->dcbnl_ops = &dcbnl_ops; + netdev->dcbnl_ops = &ixgbe_dcbnl_ops; #endif #ifdef IXGBE_FCOE diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 2fcde8777a29..e55b2602f371 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -768,9 +768,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - - /* - * Clear autoneg_advertised and set new values based on input link + /* Clear autoneg_advertised and set new values based on input link * speed. */ hw->phy.autoneg_advertised = 0; @@ -778,6 +776,12 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed & IXGBE_LINK_SPEED_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; + + if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index fdf71720e707..61dd4462411c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3959,7 +3959,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_do_ioctl = mvneta_ioctl, }; -const struct ethtool_ops mvneta_eth_tool_ops = { +static const struct ethtool_ops mvneta_eth_tool_ops = { .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .set_coalesce = mvneta_ethtool_set_coalesce, diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index c2fd7c36f927..c48632048f71 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5971,8 +5971,10 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct mvpp2_tx_queue *txq; txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); - if (!txq) - return -ENOMEM; + if (!txq) { + err = -ENOMEM; + goto err_free_percpu; + } txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); if (!txq->pcpu) { diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 6b8635378f1f..fa6d2354a0e9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -81,8 +81,9 @@ void mlx4_cq_tasklet_cb(unsigned long data) static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) { - unsigned long flags; struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; + unsigned long flags; + bool kick; spin_lock_irqsave(&tasklet_ctx->lock, flags); /* When migrating CQs between EQs will be implemented, please note @@ -92,7 +93,10 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) */ if (list_empty_careful(&cq->tasklet_ctx.list)) { atomic_inc(&cq->refcount); + kick = list_empty(&tasklet_ctx->list); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); + if (kick) + tasklet_schedule(&tasklet_ctx->task); } spin_unlock_irqrestore(&tasklet_ctx->lock, flags); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 748e9f65c386..afe4444e5434 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1382,6 +1382,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) { unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); + u32 pkt_rate_high, pkt_rate_low; struct mlx4_en_cq *cq; unsigned long packets; unsigned long rate; @@ -1395,37 +1396,40 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) return; + pkt_rate_low = READ_ONCE(priv->pkt_rate_low); + pkt_rate_high = READ_ONCE(priv->pkt_rate_high); + for (ring = 0; ring < priv->rx_ring_num; ring++) { rx_packets = READ_ONCE(priv->rx_ring[ring]->packets); rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes); - rx_pkt_diff = ((unsigned long) (rx_packets - - priv->last_moder_packets[ring])); + rx_pkt_diff = rx_packets - priv->last_moder_packets[ring]; packets = rx_pkt_diff; rate = packets * HZ / period; - avg_pkt_size = packets ? ((unsigned long) (rx_bytes - - priv->last_moder_bytes[ring])) / packets : 0; + avg_pkt_size = packets ? (rx_bytes - + priv->last_moder_bytes[ring]) / packets : 0; /* Apply auto-moderation only when packet rate * exceeds a rate that it matters */ if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { - if (rate < priv->pkt_rate_low) + if (rate <= pkt_rate_low) moder_time = priv->rx_usecs_low; - else if (rate > priv->pkt_rate_high) + else if (rate >= pkt_rate_high) moder_time = priv->rx_usecs_high; else - moder_time = (rate - priv->pkt_rate_low) * + moder_time = (rate - pkt_rate_low) * (priv->rx_usecs_high - priv->rx_usecs_low) / - (priv->pkt_rate_high - priv->pkt_rate_low) + + (pkt_rate_high - pkt_rate_low) + priv->rx_usecs_low; } else { moder_time = priv->rx_usecs_low; } - if (moder_time != priv->last_moder_time[ring]) { + cq = priv->rx_cq[ring]; + if (moder_time != priv->last_moder_time[ring] || + cq->moder_cnt != priv->rx_frames) { priv->last_moder_time[ring] = moder_time; - cq = priv->rx_cq[ring]; cq->moder_time = moder_time; cq->moder_cnt = priv->rx_frames; err = mlx4_en_set_cq_moder(priv, cq); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 0509996957d9..39232b6a974f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -494,7 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_eqe *eqe; - int cqn = -1; + int cqn; int eqes_found = 0; int set_ci = 0; int port; @@ -840,13 +840,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eq_set_ci(eq, 1); - /* cqn is 24bit wide but is initialized such that its higher bits - * are ones too. Thus, if we got any event, cqn's high bits should be off - * and we need to schedule the tasklet. - */ - if (!(cqn & ~0xffffff)) - tasklet_schedule(&eq->tasklet_ctx.task); - return eqes_found; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d8ca6d1794ef..4941b692e947 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -102,7 +102,8 @@ /* Use the maximum between 16384 and a single page */ #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) -#define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER +#define MLX4_EN_ALLOC_PREFER_ORDER min_t(int, get_order(32768), \ + PAGE_ALLOC_COSTLY_ORDER) /* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU * and 4K allocations) */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index d87a82682cb5..44406a5ec15d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1240,10 +1240,14 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); + preempt_disable(); + tcf_exts_to_list(f->exts, &actions); list_for_each_entry(a, &actions, list) tcf_action_stats_update(a, bytes, packets, lastuse); + preempt_enable(); + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 6e1bb3a4a611..db52b6a53c6f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -58,11 +58,11 @@ static const char nfp_driver_name[] = "nfp"; const char nfp_driver_version[] = VERMAGIC_STRING; static const struct pci_device_id nfp_pci_device_ids[] = { - { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, }, - { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP4000, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, }, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 165e085c2a24..15cc3e77cf6a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -640,8 +640,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000; } - if (nfp->pdev->device == PCI_DEVICE_NFP4000 || - nfp->pdev->device == PCI_DEVICE_NFP6000) { + if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 || + nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) { nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); expl_groups = 4; } else { diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 74e6f9f59bdc..edecc0a27485 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -58,11 +58,6 @@ #define PCI_64BIT_BAR_COUNT 3 -/* NFP hardware vendor/device ids. - */ -#define PCI_DEVICE_NFP4000 0x4000 -#define PCI_DEVICE_NFP6000 0x6000 - #define NFP_CPP_NUM_TARGETS 16 struct device; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 58ba5d3f9e5f..92367a06491a 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -4237,14 +4237,15 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) return 0; } -static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int nv_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct fe_priv *np = netdev_priv(dev); - u32 speed; + u32 speed, supported, advertising; int adv; spin_lock_irq(&np->lock); - ecmd->port = PORT_MII; + cmd->base.port = PORT_MII; if (!netif_running(dev)) { /* We do not track link speed / duplex setting if the * interface is disabled. Force a link check */ @@ -4272,64 +4273,71 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) speed = -1; break; } - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; if (np->duplex) - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; } else { speed = SPEED_UNKNOWN; - ecmd->duplex = DUPLEX_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_cmd_speed_set(ecmd, speed); - ecmd->autoneg = np->autoneg; + cmd->base.speed = speed; + cmd->base.autoneg = np->autoneg; - ecmd->advertising = ADVERTISED_MII; + advertising = ADVERTISED_MII; if (np->autoneg) { - ecmd->advertising |= ADVERTISED_Autoneg; + advertising |= ADVERTISED_Autoneg; adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); if (adv & ADVERTISE_10HALF) - ecmd->advertising |= ADVERTISED_10baseT_Half; + advertising |= ADVERTISED_10baseT_Half; if (adv & ADVERTISE_10FULL) - ecmd->advertising |= ADVERTISED_10baseT_Full; + advertising |= ADVERTISED_10baseT_Full; if (adv & ADVERTISE_100HALF) - ecmd->advertising |= ADVERTISED_100baseT_Half; + advertising |= ADVERTISED_100baseT_Half; if (adv & ADVERTISE_100FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; + advertising |= ADVERTISED_100baseT_Full; if (np->gigabit == PHY_GIGABIT) { adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); if (adv & ADVERTISE_1000FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; + advertising |= ADVERTISED_1000baseT_Full; } } - ecmd->supported = (SUPPORTED_Autoneg | + supported = (SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_MII); if (np->gigabit == PHY_GIGABIT) - ecmd->supported |= SUPPORTED_1000baseT_Full; + supported |= SUPPORTED_1000baseT_Full; - ecmd->phy_address = np->phyaddr; - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.phy_address = np->phyaddr; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); /* ignore maxtxpkt, maxrxpkt for now */ spin_unlock_irq(&np->lock); return 0; } -static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int nv_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct fe_priv *np = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; + u32 advertising; - if (ecmd->port != PORT_MII) - return -EINVAL; - if (ecmd->transceiver != XCVR_EXTERNAL) + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.port != PORT_MII) return -EINVAL; - if (ecmd->phy_address != np->phyaddr) { + if (cmd->base.phy_address != np->phyaddr) { /* TODO: support switching between multiple phys. Should be * trivial, but not enabled due to lack of test hardware. */ return -EINVAL; } - if (ecmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 mask; mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | @@ -4337,16 +4345,17 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (np->gigabit == PHY_GIGABIT) mask |= ADVERTISED_1000baseT_Full; - if ((ecmd->advertising & mask) == 0) + if ((advertising & mask) == 0) return -EINVAL; - } else if (ecmd->autoneg == AUTONEG_DISABLE) { + } else if (cmd->base.autoneg == AUTONEG_DISABLE) { /* Note: autonegotiation disable, speed 1000 intentionally * forbidden - no one should need that. */ if (speed != SPEED_10 && speed != SPEED_100) return -EINVAL; - if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + if (cmd->base.duplex != DUPLEX_HALF && + cmd->base.duplex != DUPLEX_FULL) return -EINVAL; } else { return -EINVAL; @@ -4376,7 +4385,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) netif_tx_unlock_bh(dev); } - if (ecmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { int adv, bmcr; np->autoneg = 1; @@ -4384,13 +4393,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) /* advertise only what has been requested */ adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - if (ecmd->advertising & ADVERTISED_10baseT_Half) + if (advertising & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; - if (ecmd->advertising & ADVERTISED_10baseT_Full) + if (advertising & ADVERTISED_10baseT_Full) adv |= ADVERTISE_10FULL; - if (ecmd->advertising & ADVERTISED_100baseT_Half) + if (advertising & ADVERTISED_100baseT_Half) adv |= ADVERTISE_100HALF; - if (ecmd->advertising & ADVERTISED_100baseT_Full) + if (advertising & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; @@ -4401,7 +4410,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (np->gigabit == PHY_GIGABIT) { adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); adv &= ~ADVERTISE_1000FULL; - if (ecmd->advertising & ADVERTISED_1000baseT_Full) + if (advertising & ADVERTISED_1000baseT_Full) adv |= ADVERTISE_1000FULL; mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); } @@ -4428,13 +4437,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) + if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF) adv |= ADVERTISE_10HALF; - if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) + if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL) adv |= ADVERTISE_10FULL; - if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) + if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF) adv |= ADVERTISE_100HALF; - if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) + if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL) adv |= ADVERTISE_100FULL; np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */ @@ -5241,8 +5250,6 @@ static const struct ethtool_ops ops = { .get_link = ethtool_op_get_link, .get_wol = nv_get_wol, .set_wol = nv_set_wol, - .get_settings = nv_get_settings, - .set_settings = nv_set_settings, .get_regs_len = nv_get_regs_len, .get_regs = nv_get_regs, .nway_reset = nv_nway_reset, @@ -5255,6 +5262,8 @@ static const struct ethtool_ops ops = { .get_sset_count = nv_get_sset_count, .self_test = nv_self_test, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = nv_get_link_ksettings, + .set_link_ksettings = nv_set_link_ksettings, }; /* The mgmt unit and driver use a semaphore to access the phy during init */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index b19be7c6c1f4..21093276d2b7 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -73,62 +73,80 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = { #define PCH_GBE_MAC_REGS_LEN (sizeof(struct pch_gbe_regs) / 4) #define PCH_GBE_REGS_LEN (PCH_GBE_MAC_REGS_LEN + PCH_GBE_PHY_REGS_LEN) /** - * pch_gbe_get_settings - Get device-specific settings + * pch_gbe_get_link_ksettings - Get device-specific settings * @netdev: Network interface device structure * @ecmd: Ethtool command * Returns: * 0: Successful. * Negative value: Failed. */ -static int pch_gbe_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int pch_gbe_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); + u32 supported, advertising; int ret; - ret = mii_ethtool_gset(&adapter->mii, ecmd); - ecmd->supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half); - ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); + ret = mii_ethtool_get_link_ksettings(&adapter->mii, ecmd); + + ethtool_convert_link_mode_to_legacy_u32(&supported, + ecmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + ecmd->link_modes.advertising); + + supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half); + advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); + + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, + advertising); if (!netif_carrier_ok(adapter->netdev)) - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->base.speed = SPEED_UNKNOWN; return ret; } /** - * pch_gbe_set_settings - Set device-specific settings + * pch_gbe_set_link_ksettings - Set device-specific settings * @netdev: Network interface device structure * @ecmd: Ethtool command * Returns: * 0: Successful. * Negative value: Failed. */ -static int pch_gbe_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int pch_gbe_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ecmd) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_hw *hw = &adapter->hw; - u32 speed = ethtool_cmd_speed(ecmd); + struct ethtool_link_ksettings copy_ecmd; + u32 speed = ecmd->base.speed; + u32 advertising; int ret; pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET); + memcpy(©_ecmd, ecmd, sizeof(*ecmd)); + /* when set_settings() is called with a ethtool_cmd previously * filled by get_settings() on a down link, speed is -1: */ if (speed == UINT_MAX) { speed = SPEED_1000; - ethtool_cmd_speed_set(ecmd, speed); - ecmd->duplex = DUPLEX_FULL; + copy_ecmd.base.speed = speed; + copy_ecmd.base.duplex = DUPLEX_FULL; } - ret = mii_ethtool_sset(&adapter->mii, ecmd); + ret = mii_ethtool_set_link_ksettings(&adapter->mii, ©_ecmd); if (ret) { - netdev_err(netdev, "Error: mii_ethtool_sset\n"); + netdev_err(netdev, "Error: mii_ethtool_set_link_ksettings\n"); return ret; } hw->mac.link_speed = speed; - hw->mac.link_duplex = ecmd->duplex; - hw->phy.autoneg_advertised = ecmd->advertising; - hw->mac.autoneg = ecmd->autoneg; + hw->mac.link_duplex = copy_ecmd.base.duplex; + ethtool_convert_link_mode_to_legacy_u32( + &advertising, copy_ecmd.link_modes.advertising); + hw->phy.autoneg_advertised = advertising; + hw->mac.autoneg = copy_ecmd.base.autoneg; /* reset the link */ if (netif_running(adapter->netdev)) { @@ -487,8 +505,6 @@ static int pch_gbe_get_sset_count(struct net_device *netdev, int sset) } static const struct ethtool_ops pch_gbe_ethtool_ops = { - .get_settings = pch_gbe_get_settings, - .set_settings = pch_gbe_set_settings, .get_drvinfo = pch_gbe_get_drvinfo, .get_regs_len = pch_gbe_get_regs_len, .get_regs = pch_gbe_get_regs, @@ -503,6 +519,8 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = { .get_strings = pch_gbe_get_strings, .get_ethtool_stats = pch_gbe_get_ethtool_stats, .get_sset_count = pch_gbe_get_sset_count, + .get_link_ksettings = pch_gbe_get_link_ksettings, + .set_link_ksettings = pch_gbe_set_link_ksettings, }; void pch_gbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index baff744b560e..8b026dbf0d8d 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -1811,21 +1811,23 @@ static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } -static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int hamachi_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct hamachi_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); - mii_ethtool_gset(&np->mii_if, ecmd); + mii_ethtool_get_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return 0; } -static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +static int hamachi_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct hamachi_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); - res = mii_ethtool_sset(&np->mii_if, ecmd); + res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); spin_unlock_irq(&np->lock); return res; } @@ -1845,10 +1847,10 @@ static u32 hamachi_get_link(struct net_device *dev) static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = hamachi_get_drvinfo, - .get_settings = hamachi_get_settings, - .set_settings = hamachi_set_settings, .nway_reset = hamachi_nway_reset, .get_link = hamachi_get_link, + .get_link_ksettings = hamachi_get_link_ksettings, + .set_link_ksettings = hamachi_set_link_ksettings, }; static const struct ethtool_ops ethtool_ops_no_mii = { diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index aaa1e8517348..c2e24afbaeb2 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -114,4 +114,7 @@ config QED_RDMA config QED_ISCSI bool +config QED_FCOE + bool + endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index f9034467736c..3157f97dd782 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -96,69 +96,70 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) } static int -netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +netxen_nic_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct netxen_adapter *adapter = netdev_priv(dev); int check_sfp_module = 0; + u32 supported, advertising; /* read which mode */ if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - ecmd->supported = (SUPPORTED_10baseT_Half | + supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); - ecmd->advertising = (ADVERTISED_100baseT_Half | + advertising = (ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->duplex = adapter->link_duplex; - ecmd->autoneg = adapter->link_autoneg; + cmd->base.speed = adapter->link_speed; + cmd->base.duplex = adapter->link_duplex; + cmd->base.autoneg = adapter->link_autoneg; } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { u32 val; val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); if (val == NETXEN_PORT_MODE_802_3_AP) { - ecmd->supported = SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_1000baseT_Full; + supported = SUPPORTED_1000baseT_Full; + advertising = ADVERTISED_1000baseT_Full; } else { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; + supported = SUPPORTED_10000baseT_Full; + advertising = ADVERTISED_10000baseT_Full; } if (netif_running(dev) && adapter->has_link_events) { - ethtool_cmd_speed_set(ecmd, adapter->link_speed); - ecmd->autoneg = adapter->link_autoneg; - ecmd->duplex = adapter->link_duplex; + cmd->base.speed = adapter->link_speed; + cmd->base.autoneg = adapter->link_autoneg; + cmd->base.duplex = adapter->link_duplex; goto skip; } - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { u16 pcifn = adapter->ahw.pci_func; val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn)); - ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ * - P3_LINK_SPEED_VAL(pcifn, val)); + cmd->base.speed = P3_LINK_SPEED_MHZ * + P3_LINK_SPEED_VAL(pcifn, val); } else - ethtool_cmd_speed_set(ecmd, SPEED_10000); + cmd->base.speed = SPEED_10000; - ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_DISABLE; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_DISABLE; } else return -EIO; skip: - ecmd->phy_address = adapter->physical_port; - ecmd->transceiver = XCVR_EXTERNAL; + cmd->base.phy_address = adapter->physical_port; switch (adapter->ahw.board_type) { case NETXEN_BRDTYPE_P2_SB35_4G: @@ -167,16 +168,16 @@ skip: case NETXEN_BRDTYPE_P3_4_GB: case NETXEN_BRDTYPE_P3_4_GB_MM: - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; case NETXEN_BRDTYPE_P2_SB31_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4_LP: case NETXEN_BRDTYPE_P3_10000_BASE_T: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - ecmd->autoneg = (adapter->ahw.board_type == + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + cmd->base.autoneg = (adapter->ahw.board_type == NETXEN_BRDTYPE_P2_SB31_10G_CX4) ? (AUTONEG_DISABLE) : (adapter->link_autoneg); break; @@ -185,39 +186,39 @@ skip: case NETXEN_BRDTYPE_P3_IMEZ: case NETXEN_BRDTYPE_P3_XG_LOM: case NETXEN_BRDTYPE_P3_HMEZ: - ecmd->supported |= SUPPORTED_MII; - ecmd->advertising |= ADVERTISED_MII; - ecmd->port = PORT_MII; - ecmd->autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_MII; + advertising |= ADVERTISED_MII; + cmd->base.port = PORT_MII; + cmd->base.autoneg = AUTONEG_DISABLE; break; case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: case NETXEN_BRDTYPE_P3_10G_SFP_CT: case NETXEN_BRDTYPE_P3_10G_SFP_QT: - ecmd->advertising |= ADVERTISED_TP; - ecmd->supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + supported |= SUPPORTED_TP; check_sfp_module = netif_running(dev) && adapter->has_link_events; case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P3_10G_XFP: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + cmd->base.autoneg = AUTONEG_DISABLE; break; case NETXEN_BRDTYPE_P3_10G_TP: if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= + cmd->base.autoneg = AUTONEG_DISABLE; + supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; check_sfp_module = netif_running(dev) && adapter->has_link_events; } else { - ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); - ecmd->advertising |= + supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; } break; default: @@ -232,31 +233,37 @@ skip: case LINKEVENT_MODULE_OPTICAL_SRLR: case LINKEVENT_MODULE_OPTICAL_LRM: case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; break; case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: case LINKEVENT_MODULE_TWINAX: - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; break; default: - ecmd->port = -1; + cmd->base.port = -1; } } if (!netif_running(dev) || !adapter->ahw.linkup) { - ecmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; } + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } static int -netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +netxen_nic_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct netxen_adapter *adapter = netdev_priv(dev); - u32 speed = ethtool_cmd_speed(ecmd); + u32 speed = cmd->base.speed; int ret; if (adapter->ahw.port_type != NETXEN_NIC_GBE) @@ -265,16 +272,16 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) return -EOPNOTSUPP; - ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex, - ecmd->autoneg); + ret = nx_fw_cmd_set_gbe_port(adapter, speed, cmd->base.duplex, + cmd->base.autoneg); if (ret == NX_RCODE_NOT_SUPPORTED) return -EOPNOTSUPP; else if (ret) return -EIO; adapter->link_speed = speed; - adapter->link_duplex = ecmd->duplex; - adapter->link_autoneg = ecmd->autoneg; + adapter->link_duplex = cmd->base.duplex; + adapter->link_autoneg = cmd->base.autoneg; if (!netif_running(dev)) return 0; @@ -931,8 +938,6 @@ netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, } const struct ethtool_ops netxen_nic_ethtool_ops = { - .get_settings = netxen_nic_get_settings, - .set_settings = netxen_nic_set_settings, .get_drvinfo = netxen_nic_get_drvinfo, .get_regs_len = netxen_nic_get_regs_len, .get_regs = netxen_nic_get_regs, @@ -954,4 +959,6 @@ const struct ethtool_ops netxen_nic_ethtool_ops = { .get_dump_flag = netxen_get_dump_flag, .get_dump_data = netxen_get_dump_data, .set_dump = netxen_set_dump, + .get_link_ksettings = netxen_nic_get_link_ksettings, + .set_link_ksettings = netxen_nic_set_link_ksettings, }; diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 1a7300f72cab..974929dcc74e 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -7,3 +7,4 @@ qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_RDMA) += qed_roce.o qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o +qed-$(CONFIG_QED_FCOE) += qed_fcoe.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 6557f94b92ed..61a9cd5be497 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -60,6 +60,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_WFQ_UNIT 100 #define ISCSI_BDQ_ID(_port_id) (_port_id) +#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2) #define QED_WID_SIZE (1024) #define QED_PF_DEMS_SIZE (4) @@ -167,6 +168,7 @@ struct qed_tunn_update_params { */ enum qed_pci_personality { QED_PCI_ETH, + QED_PCI_FCOE, QED_PCI_ISCSI, QED_PCI_ETH_ROCE, QED_PCI_DEFAULT /* default in shmem */ @@ -204,6 +206,7 @@ enum QED_FEATURE { QED_VF, QED_RDMA_CNQ, QED_VF_L2_QUE, + QED_FCOE_CQ, QED_MAX_FEATURES, }; @@ -221,6 +224,7 @@ enum QED_PORT_MODE { enum qed_dev_cap { QED_DEV_CAP_ETH, + QED_DEV_CAP_FCOE, QED_DEV_CAP_ISCSI, QED_DEV_CAP_ROCE, }; @@ -255,6 +259,10 @@ struct qed_hw_info { u32 part_num[4]; unsigned char hw_mac_addr[ETH_ALEN]; + u64 node_wwn; + u64 port_wwn; + + u16 num_fcoe_conns; struct qed_igu_info *p_igu_info; @@ -410,6 +418,7 @@ struct qed_hwfn { struct qed_ooo_info *p_ooo_info; struct qed_rdma_info *p_rdma_info; struct qed_iscsi_info *p_iscsi_info; + struct qed_fcoe_info *p_fcoe_info; struct qed_pf_params pf_params; bool b_rdma_enabled_in_prs; @@ -620,11 +629,13 @@ struct qed_dev { u8 protocol; #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH) +#define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE) /* Callbacks to protocol driver */ union { struct qed_common_cb_ops *common; struct qed_eth_cb_ops *eth; + struct qed_fcoe_cb_ops *fcoe; struct qed_iscsi_cb_ops *iscsi; } protocol_ops; void *ops_cookie; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index dcb8fc185df7..d42d03df751a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -90,12 +90,14 @@ union conn_context { struct core_conn_context core_ctx; struct eth_conn_context eth_ctx; struct iscsi_conn_context iscsi_ctx; + struct fcoe_conn_context fcoe_ctx; struct roce_conn_context roce_ctx; }; -/* TYPE-0 task context - iSCSI */ +/* TYPE-0 task context - iSCSI, FCOE */ union type0_task_context { struct iscsi_task_context iscsi_ctx; + struct fcoe_task_context fcoe_ctx; }; /* TYPE-1 task context - ROCE */ @@ -240,15 +242,22 @@ struct qed_cxt_mngr { static bool src_proto(enum protocol_type type) { return type == PROTOCOLID_ISCSI || + type == PROTOCOLID_FCOE || type == PROTOCOLID_ROCE; } static bool tm_cid_proto(enum protocol_type type) { return type == PROTOCOLID_ISCSI || + type == PROTOCOLID_FCOE || type == PROTOCOLID_ROCE; } +static bool tm_tid_proto(enum protocol_type type) +{ + return type == PROTOCOLID_FCOE; +} + /* counts the iids for the CDU/CDUC ILT client configuration */ struct qed_cdu_iids { u32 pf_cids; @@ -307,6 +316,22 @@ static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr, iids->pf_cids += p_cfg->cid_count; iids->per_vf_cids += p_cfg->cids_per_vf; } + + if (tm_tid_proto(i)) { + struct qed_tid_seg *segs = p_cfg->tid_seg; + + /* for each segment there is at most one + * protocol for which count is not 0. + */ + for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) + iids->pf_tids[j] += segs[j].count; + + /* The last array elelment is for the VFs. As for PF + * segments there can be only one protocol for + * which this value is not 0. + */ + iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; + } } iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN); @@ -1694,9 +1719,42 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) /* @@@TBD how to enable the scan for the VFs */ } +static void qed_prs_init_common(struct qed_hwfn *p_hwfn) +{ + if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) && + p_hwfn->pf_params.fcoe_pf_params.is_target) + STORE_RT_REG(p_hwfn, + PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0); +} + +static void qed_prs_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_conn_type_cfg *p_fcoe; + struct qed_tid_seg *p_tid; + + p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE]; + + /* If FCoE is active set the MAX OX_ID (tid) in the Parser */ + if (!p_fcoe->cid_count) + return; + + p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG]; + if (p_hwfn->pf_params.fcoe_pf_params.is_target) { + STORE_RT_REG_AGG(p_hwfn, + PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET, + p_tid->count); + } else { + STORE_RT_REG_AGG(p_hwfn, + PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET, + p_tid->count); + } +} + void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) { qed_cdu_init_common(p_hwfn); + qed_prs_init_common(p_hwfn); } void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) @@ -1708,6 +1766,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) qed_ilt_init_pf(p_hwfn); qed_src_init_pf(p_hwfn); qed_tm_init_pf(p_hwfn); + qed_prs_init_pf(p_hwfn); } int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, @@ -1885,6 +1944,27 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) p_params->num_cons, 1); break; } + case QED_PCI_FCOE: + { + struct qed_fcoe_pf_params *p_params; + + p_params = &p_hwfn->pf_params.fcoe_pf_params; + + if (p_params->num_cons && p_params->num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, + PROTOCOLID_FCOE, + p_params->num_cons, + 0); + + qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE, + QED_CXT_FCOE_TID_SEG, 0, + p_params->num_tasks, true); + } else { + DP_INFO(p_hwfn->cdev, + "Fcoe personality used without setting params!\n"); + } + break; + } case QED_PCI_ISCSI: { struct qed_iscsi_pf_params *p_params; @@ -1927,6 +2007,10 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, /* Verify the personality */ switch (p_hwfn->hw_info.personality) { + case QED_PCI_FCOE: + proto = PROTOCOLID_FCOE; + seg = QED_CXT_FCOE_TID_SEG; + break; case QED_PCI_ISCSI: proto = PROTOCOLID_ISCSI; seg = QED_CXT_ISCSI_TID_SEG; @@ -2215,15 +2299,19 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *p_cli; - struct qed_ilt_cli_blk *p_seg; struct qed_tid_seg *p_seg_info; - u32 proto, seg; - u32 total_lines; - u32 tid_size, ilt_idx; + struct qed_ilt_cli_blk *p_seg; u32 num_tids_per_block; + u32 tid_size, ilt_idx; + u32 total_lines; + u32 proto, seg; /* Verify the personality */ switch (p_hwfn->hw_info.personality) { + case QED_PCI_FCOE: + proto = PROTOCOLID_FCOE; + seg = QED_CXT_FCOE_TID_SEG; + break; case QED_PCI_ISCSI: proto = PROTOCOLID_ISCSI; seg = QED_CXT_ISCSI_TID_SEG; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 98f4973cac9d..8b010324268a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -91,6 +91,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, #define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI #define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE +#define QED_CXT_FCOE_TID_SEG PROTOCOLID_FCOE enum qed_cxt_elem_type { QED_ELEM_CXT, QED_ELEM_SRQ, @@ -204,4 +205,6 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); #define QED_CTX_WORKING_MEM 0 #define QED_CTX_FL_MEM 1 +int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, + u32 tid, u8 ctx_type, void **task_ctx); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index dc0d2c9ad6b5..5bd36a4a8fcd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -432,7 +432,6 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn, return rc; } -#ifdef CONFIG_DCB static void qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn, struct qed_dcbx_app_prio *p_prio, @@ -749,7 +748,6 @@ qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return 0; } -#endif static int qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -864,6 +862,15 @@ static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, return rc; } +void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type) +{ + struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; + void *cookie = hwfn->cdev->ops_cookie; + + if (cookie && op->dcbx_aen) + op->dcbx_aen(cookie, &hwfn->p_dcbx_info->get, mib_type); +} + /* Read updated MIB. * Reconfigure QM and invoke PF update ramrod command if operational MIB * change is detected. @@ -890,6 +897,8 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, qed_sp_pf_update(p_hwfn); } } + qed_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type); + qed_dcbx_aen(p_hwfn, type); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index d70300fda020..0fabe97f998d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -57,7 +57,6 @@ struct qed_dcbx_app_data { u8 tc; /* Traffic Class */ }; -#ifdef CONFIG_DCB #define QED_DCBX_VERSION_DISABLED 0 #define QED_DCBX_VERSION_IEEE 1 #define QED_DCBX_VERSION_CEE 2 @@ -73,7 +72,6 @@ struct qed_dcbx_set { struct qed_dcbx_admin_params config; u32 ver_num; }; -#endif struct qed_dcbx_results { bool dcbx_enabled; @@ -97,9 +95,8 @@ struct qed_dcbx_info { struct qed_dcbx_results results; struct dcbx_mib operational; struct dcbx_mib remote; -#ifdef CONFIG_DCB struct qed_dcbx_set set; -#endif + struct qed_dcbx_get get; u8 dcbx_cap; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 33e720143b8d..5ee7f040c50a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -49,6 +49,7 @@ #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_dev_api.h" +#include "qed_fcoe.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" @@ -172,6 +173,9 @@ void qed_resc_free(struct qed_dev *cdev) #ifdef CONFIG_QED_LL2 qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); #endif + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info); + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info); qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info); @@ -433,6 +437,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) int qed_resc_alloc(struct qed_dev *cdev) { struct qed_iscsi_info *p_iscsi_info; + struct qed_fcoe_info *p_fcoe_info; struct qed_ooo_info *p_ooo_info; #ifdef CONFIG_QED_LL2 struct qed_ll2_info *p_ll2_info; @@ -539,6 +544,14 @@ int qed_resc_alloc(struct qed_dev *cdev) p_hwfn->p_ll2_info = p_ll2_info; } #endif + + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { + p_fcoe_info = qed_fcoe_alloc(p_hwfn); + if (!p_fcoe_info) + goto alloc_no_mem; + p_hwfn->p_fcoe_info = p_fcoe_info; + } + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { p_iscsi_info = qed_iscsi_alloc(p_hwfn); if (!p_iscsi_info) @@ -602,6 +615,9 @@ void qed_resc_setup(struct qed_dev *cdev) if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); #endif + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info); + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info); qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info); @@ -994,7 +1010,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Protocl Configuration */ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); - STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, + (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); /* Cleanup chip from previous driver if such remains exist */ @@ -1026,8 +1043,16 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* send function start command */ rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, allow_npar_tx_switch); - if (rc) + if (rc) { DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); + return rc; + } + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2)); + qed_wr(p_hwfn, p_ptt, + PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, + 0x100); + } } return rc; } @@ -1787,8 +1812,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; + u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; struct qed_mcp_link_params *link; /* Read global nvm_cfg address */ @@ -1934,6 +1959,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) __set_bit(QED_DEV_CAP_ETH, &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) + __set_bit(QED_DEV_CAP_FCOE, + &p_hwfn->hw_info.device_capabilities); if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) __set_bit(QED_DEV_CAP_ISCSI, &p_hwfn->hw_info.device_capabilities); @@ -2671,6 +2699,177 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); } +int +qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, enum qed_llh_port_filter_type_t type) +{ + u32 high = 0, low = 0, en; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return 0; + + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + high = source_port_or_eth_type; + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + case QED_LLH_FILTER_UDP_SRC_PORT: + low = source_port_or_eth_type << 16; + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + case QED_LLH_FILTER_UDP_DEST_PORT: + low = dest_port; + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + low = (source_port_or_eth_type << 16) | dest_port; + break; + default: + DP_NOTICE(p_hwfn, + "Non valid LLH protocol filter type %d\n", type); + return -EINVAL; + } + /* Find a free entry and utilize it */ + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + en = qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); + if (en) + continue; + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32), low); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), high); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32), 1 << type); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); + break; + } + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { + DP_NOTICE(p_hwfn, + "Failed to find an empty LLH filter to utilize\n"); + return -EINVAL; + } + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "ETH type %x is added at %d\n", + source_port_or_eth_type, i); + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "TCP src port %x is added at %d\n", + source_port_or_eth_type, i); + break; + case QED_LLH_FILTER_UDP_SRC_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "UDP src port %x is added at %d\n", + source_port_or_eth_type, i); + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "TCP dst port %x is added at %d\n", dest_port, i); + break; + case QED_LLH_FILTER_UDP_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "UDP dst port %x is added at %d\n", dest_port, i); + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "TCP src/dst ports %x/%x are added at %d\n", + source_port_or_eth_type, dest_port, i); + break; + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "UDP src/dst ports %x/%x are added at %d\n", + source_port_or_eth_type, dest_port, i); + break; + } + return 0; +} + +void +qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, + enum qed_llh_port_filter_type_t type) +{ + u32 high = 0, low = 0; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return; + + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + high = source_port_or_eth_type; + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + case QED_LLH_FILTER_UDP_SRC_PORT: + low = source_port_or_eth_type << 16; + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + case QED_LLH_FILTER_UDP_DEST_PORT: + low = dest_port; + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + low = (source_port_or_eth_type << 16) | dest_port; + break; + default: + DP_NOTICE(p_hwfn, + "Non valid LLH protocol filter type %d\n", type); + return; + } + + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + if (!qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32))) + continue; + if (!qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32))) + continue; + if (!(qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32)) & BIT(type))) + continue; + if (qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32)) != low) + continue; + if (qed_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32)) != high) + continue; + + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), 0); + break; + } + + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) + DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); +} + static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr, void *p_eth_qzone, size_t eth_qzone_size, u8 timeset) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 5d37ba24da40..6812003411cd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -353,6 +353,48 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 *p_filter); +enum qed_llh_port_filter_type_t { + QED_LLH_FILTER_ETHERTYPE, + QED_LLH_FILTER_TCP_SRC_PORT, + QED_LLH_FILTER_TCP_DEST_PORT, + QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT, + QED_LLH_FILTER_UDP_SRC_PORT, + QED_LLH_FILTER_UDP_DEST_PORT, + QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT +}; + +/** + * @brief qed_llh_add_protocol_filter - configures a protocol filter in llh + * + * @param p_hwfn + * @param p_ptt + * @param source_port_or_eth_type - source port or ethertype to add + * @param dest_port - destination port to add + * @param type - type of filters and comparing + */ +int +qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, + enum qed_llh_port_filter_type_t type); + +/** + * @brief qed_llh_remove_protocol_filter - remove a protocol filter in llh + * + * @param p_hwfn + * @param p_ptt + * @param source_port_or_eth_type - source port or ethertype to add + * @param dest_port - destination port to add + * @param type - type of filters and comparing + */ +void +qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 source_port_or_eth_type, + u16 dest_port, + enum qed_llh_port_filter_type_t type); + /** * *@brief Cleanup of previous driver remains prior to load * diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c new file mode 100644 index 000000000000..cbc81412174f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -0,0 +1,1014 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/param.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/version.h> +#include <linux/workqueue.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#define __PREVENT_DUMP_MEM_ARR__ +#define __PREVENT_PXP_GLOBAL_WIN__ +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_fcoe.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include <linux/qed/qed_fcoe_if.h> + +struct qed_fcoe_conn { + struct list_head list_entry; + bool free_on_delete; + + u16 conn_id; + u32 icid; + u32 fw_cid; + u8 layer_code; + + dma_addr_t sq_pbl_addr; + dma_addr_t sq_curr_page_addr; + dma_addr_t sq_next_page_addr; + dma_addr_t xferq_pbl_addr; + void *xferq_pbl_addr_virt_addr; + dma_addr_t xferq_addr[4]; + void *xferq_addr_virt_addr[4]; + dma_addr_t confq_pbl_addr; + void *confq_pbl_addr_virt_addr; + dma_addr_t confq_addr[2]; + void *confq_addr_virt_addr[2]; + + dma_addr_t terminate_params; + + u16 dst_mac_addr_lo; + u16 dst_mac_addr_mid; + u16 dst_mac_addr_hi; + u16 src_mac_addr_lo; + u16 src_mac_addr_mid; + u16 src_mac_addr_hi; + + u16 tx_max_fc_pay_len; + u16 e_d_tov_timer_val; + u16 rec_tov_timer_val; + u16 rx_max_fc_pay_len; + u16 vlan_tag; + u16 physical_q0; + + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; + u8 def_q_idx; +}; + +static int +qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_fcoe_pf_params *fcoe_pf_params = NULL; + struct fcoe_init_ramrod_params *p_ramrod = NULL; + struct fcoe_init_func_ramrod_data *p_data; + struct fcoe_conn_context *p_cxt = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + struct qed_cxt_info cxt_info; + u32 dummy_cid; + int rc = 0; + u16 tmp; + u8 i; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_INIT_FUNC, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_init; + p_data = &p_ramrod->init_ramrod_data; + fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params; + + p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); + tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages); + p_data->sq_num_pages_in_pbl = tmp; + + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); + if (rc) + return rc; + + cxt_info.iid = dummy_cid; + rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); + if (rc) { + DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", + dummy_cid); + return rc; + } + p_cxt = cxt_info.p_cxt; + SET_FIELD(p_cxt->tstorm_ag_context.flags3, + TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); + + fcoe_pf_params->dummy_icid = (u16)dummy_cid; + + tmp = cpu_to_le16(fcoe_pf_params->num_tasks); + p_data->func_params.num_tasks = tmp; + p_data->func_params.log_page_size = fcoe_pf_params->log_page_size; + p_data->func_params.debug_mode = fcoe_pf_params->debug_mode; + + DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr, + fcoe_pf_params->glbl_q_params_addr); + + tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries); + p_data->q_params.cq_num_entries = tmp; + + tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries); + p_data->q_params.cmdq_num_entries = tmp; + + tmp = fcoe_pf_params->num_cqs; + p_data->q_params.num_queues = (u8)tmp; + + tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; + p_data->q_params.queue_relative_offset = (u8)tmp; + + for (i = 0; i < fcoe_pf_params->num_cqs; i++) { + tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id); + p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; + } + + p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi; + p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi; + + p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id); + + DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ], + fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]); + p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] = + fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ]; + tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]; + p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); + tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]; + p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); + + DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA], + fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); + p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = + fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; + tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]; + p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); + tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]; + p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); + tmp = fcoe_pf_params->rq_buffer_size; + p_data->q_params.rq_buffer_size = cpu_to_le16(tmp); + + if (fcoe_pf_params->is_target) { + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA]) + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1); + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1); + } else { + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + } + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + return rc; +} + +static int +qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL; + struct fcoe_conn_offload_ramrod_data *p_data; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u16 pq_id = 0, tmp; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_conn_ofld; + p_data = &p_ramrod->offload_ramrod_data; + + /* Transmission PQ is the first of the PF */ + pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL); + p_conn->physical_q0 = cpu_to_le16(pq_id); + p_data->physical_q0 = cpu_to_le16(pq_id); + + p_data->conn_id = cpu_to_le16(p_conn->conn_id); + DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr); + DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr); + DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr); + DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr); + DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]); + DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]); + + DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr); + DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]); + DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]); + + p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo); + p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid); + p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi); + p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo); + p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid); + p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi); + + tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len); + p_data->tx_max_fc_pay_len = tmp; + tmp = cpu_to_le16(p_conn->e_d_tov_timer_val); + p_data->e_d_tov_timer_val = tmp; + tmp = cpu_to_le16(p_conn->rec_tov_timer_val); + p_data->rec_rr_tov_timer_val = tmp; + tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len); + p_data->rx_max_fc_pay_len = tmp; + + p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag); + p_data->s_id.addr_hi = p_conn->s_id.addr_hi; + p_data->s_id.addr_mid = p_conn->s_id.addr_mid; + p_data->s_id.addr_lo = p_conn->s_id.addr_lo; + p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3; + p_data->d_id.addr_hi = p_conn->d_id.addr_hi; + p_data->d_id.addr_mid = p_conn->d_id.addr_mid; + p_data->d_id.addr_lo = p_conn->d_id.addr_lo; + p_data->flags = p_conn->flags; + p_data->def_q_idx = p_conn->def_q_idx; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_TERMINATE_CONN, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_conn_terminate; + DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr, + p_conn->terminate_params); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u32 active_segs = 0; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_DESTROY_FUNC, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK); + active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG); + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn **p_out_conn) +{ + struct qed_fcoe_conn *p_conn = NULL; + void *p_addr; + u32 i; + + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + if (!list_empty(&p_hwfn->p_fcoe_info->free_list)) + p_conn = + list_first_entry(&p_hwfn->p_fcoe_info->free_list, + struct qed_fcoe_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + *p_out_conn = p_conn; + return 0; + } + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + + p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); + if (!p_conn) + return -ENOMEM; + + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->xferq_pbl_addr, GFP_KERNEL); + if (!p_addr) + goto nomem_pbl_xferq; + p_conn->xferq_pbl_addr_virt_addr = p_addr; + + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->xferq_addr[i], GFP_KERNEL); + if (!p_addr) + goto nomem_xferq; + p_conn->xferq_addr_virt_addr[i] = p_addr; + + p_addr = p_conn->xferq_pbl_addr_virt_addr; + ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i]; + } + + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->confq_pbl_addr, GFP_KERNEL); + if (!p_addr) + goto nomem_xferq; + p_conn->confq_pbl_addr_virt_addr = p_addr; + + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->confq_addr[i], GFP_KERNEL); + if (!p_addr) + goto nomem_confq; + p_conn->confq_addr_virt_addr[i] = p_addr; + + p_addr = p_conn->confq_pbl_addr_virt_addr; + ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i]; + } + + p_conn->free_on_delete = true; + *p_out_conn = p_conn; + return 0; + +nomem_confq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_pbl_addr_virt_addr, + p_conn->confq_pbl_addr); + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) + if (p_conn->confq_addr_virt_addr[i]) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_addr_virt_addr[i], + p_conn->confq_addr[i]); +nomem_xferq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_pbl_addr_virt_addr, + p_conn->xferq_pbl_addr); + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) + if (p_conn->xferq_addr_virt_addr[i]) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_addr_virt_addr[i], + p_conn->xferq_addr[i]); +nomem_pbl_xferq: + kfree(p_conn); + return -ENOMEM; +} + +static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn) +{ + u32 i; + + if (!p_conn) + return; + + if (p_conn->confq_pbl_addr_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_pbl_addr_virt_addr, + p_conn->confq_pbl_addr); + + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { + if (!p_conn->confq_addr_virt_addr[i]) + continue; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_addr_virt_addr[i], + p_conn->confq_addr[i]); + } + + if (p_conn->xferq_pbl_addr_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_pbl_addr_virt_addr, + p_conn->xferq_pbl_addr); + + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { + if (!p_conn->xferq_addr_virt_addr[i]) + continue; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_addr_virt_addr[i], + p_conn->xferq_addr[i]); + } + kfree(p_conn); +} + +static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) +{ + return (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(cid, DQ_DEMS_LEGACY); +} + +static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); + + return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); +} + +static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); + + return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + + TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); +} + +struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_fcoe_info *p_fcoe_info; + + /* Allocate LL2's set struct */ + p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL); + if (!p_fcoe_info) { + DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n"); + return NULL; + } + INIT_LIST_HEAD(&p_fcoe_info->free_list); + return p_fcoe_info; +} + +void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) +{ + struct fcoe_task_context *p_task_ctx = NULL; + int rc; + u32 i; + + spin_lock_init(&p_fcoe_info->lock); + for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) { + rc = qed_cxt_get_task_ctx(p_hwfn, i, + QED_CTX_WORKING_MEM, + (void **)&p_task_ctx); + if (rc) + continue; + + memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); + SET_FIELD(p_task_ctx->timer_context.logical_client_0, + TIMERS_CONTEXT_VALIDLC0, 1); + SET_FIELD(p_task_ctx->timer_context.logical_client_1, + TIMERS_CONTEXT_VALIDLC1, 1); + SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); + } +} + +void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) +{ + struct qed_fcoe_conn *p_conn = NULL; + + if (!p_fcoe_info) + return; + + while (!list_empty(&p_fcoe_info->free_list)) { + p_conn = list_first_entry(&p_fcoe_info->free_list, + struct qed_fcoe_conn, list_entry); + if (!p_conn) + break; + list_del(&p_conn->list_entry); + qed_fcoe_free_connection(p_hwfn, p_conn); + } + + kfree(p_fcoe_info); +} + +static int +qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_in_conn, + struct qed_fcoe_conn **p_out_conn) +{ + struct qed_fcoe_conn *p_conn = NULL; + int rc = 0; + u32 icid; + + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + if (rc) + return rc; + + /* Use input connection [if provided] or allocate a new one */ + if (p_in_conn) { + p_conn = p_in_conn; + } else { + rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn); + if (rc) { + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + qed_cxt_release_cid(p_hwfn, icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + return rc; + } + } + + p_conn->icid = icid; + p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; + *p_out_conn = p_conn; + + return rc; +} + +static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn) +{ + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list); + qed_cxt_release_cid(p_hwfn, p_conn->icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); +} + +static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_fcoe_stats *p_stats) +{ + struct fcoe_rx_stat tstats; + u32 tstats_addr; + + memset(&tstats, 0, sizeof(tstats)); + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); + + p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt); + p_stats->fcoe_rx_data_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt); + p_stats->fcoe_rx_xfer_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt); + p_stats->fcoe_rx_other_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt); + + p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt); + p_stats->fcoe_silent_drop_pkt_rq_full_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt); + p_stats->fcoe_silent_drop_pkt_crc_error_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt); + p_stats->fcoe_silent_drop_pkt_task_invalid_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt); + p_stats->fcoe_silent_drop_total_pkt_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt); +} + +static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_fcoe_stats *p_stats) +{ + struct fcoe_tx_stat pstats; + u32 pstats_addr; + + memset(&pstats, 0, sizeof(pstats)); + pstats_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); + + p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt); + p_stats->fcoe_tx_data_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt); + p_stats->fcoe_tx_xfer_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt); + p_stats->fcoe_tx_other_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt); +} + +static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn, + struct qed_fcoe_stats *p_stats) +{ + struct qed_ptt *p_ptt; + + memset(p_stats, 0, sizeof(*p_stats)); + + p_ptt = qed_ptt_acquire(p_hwfn); + + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + return -EINVAL; + } + + _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats); + _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +struct qed_hash_fcoe_con { + struct hlist_node node; + struct qed_fcoe_conn *con; +}; + +static int qed_fill_fcoe_dev_info(struct qed_dev *cdev, + struct qed_dev_fcoe_info *info) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + int rc; + + memset(info, 0, sizeof(*info)); + rc = qed_fill_dev_info(cdev, &info->common); + + info->primary_dbq_rq_addr = + qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); + info->secondary_bdq_rq_addr = + qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + + return rc; +} + +static void qed_register_fcoe_ops(struct qed_dev *cdev, + struct qed_fcoe_cb_ops *ops, void *cookie) +{ + cdev->protocol_ops.fcoe = ops; + cdev->ops_cookie = cookie; +} + +static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev, + u32 handle) +{ + struct qed_hash_fcoe_con *hash_con = NULL; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) + return NULL; + + hash_for_each_possible(cdev->connections, hash_con, node, handle) { + if (hash_con->con->icid == handle) + break; + } + + if (!hash_con || (hash_con->con->icid != handle)) + return NULL; + + return hash_con; +} + +static int qed_fcoe_stop(struct qed_dev *cdev) +{ + int rc; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { + DP_NOTICE(cdev, "fcoe already stopped\n"); + return 0; + } + + if (!hash_empty(cdev->connections)) { + DP_NOTICE(cdev, + "Can't stop fcoe - not all connections were returned\n"); + return -EINVAL; + } + + /* Stop the fcoe */ + rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL); + cdev->flags &= ~QED_FLAG_STORAGE_STARTED; + + return rc; +} + +static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks) +{ + int rc; + + if (cdev->flags & QED_FLAG_STORAGE_STARTED) { + DP_NOTICE(cdev, "fcoe already started;\n"); + return 0; + } + + rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) { + DP_NOTICE(cdev, "Failed to start fcoe\n"); + return rc; + } + + cdev->flags |= QED_FLAG_STORAGE_STARTED; + hash_init(cdev->connections); + + if (tasks) { + struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info), + GFP_ATOMIC); + + if (!tid_info) { + DP_NOTICE(cdev, + "Failed to allocate tasks information\n"); + qed_fcoe_stop(cdev); + return -ENOMEM; + } + + rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info); + if (rc) { + DP_NOTICE(cdev, "Failed to gather task information\n"); + qed_fcoe_stop(cdev); + kfree(tid_info); + return rc; + } + + /* Fill task information */ + tasks->size = tid_info->tid_size; + tasks->num_tids_per_block = tid_info->num_tids_per_block; + memcpy(tasks->blocks, tid_info->blocks, + MAX_TID_BLOCKS_FCOE * sizeof(u8 *)); + + kfree(tid_info); + } + + return 0; +} + +static int qed_fcoe_acquire_conn(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell) +{ + struct qed_hash_fcoe_con *hash_con; + int rc; + + /* Allocate a hashed connection */ + hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to allocate hashed connection\n"); + return -ENOMEM; + } + + /* Acquire the connection */ + rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL, + &hash_con->con); + if (rc) { + DP_NOTICE(cdev, "Failed to acquire Connection\n"); + kfree(hash_con); + return rc; + } + + /* Added the connection to hash table */ + *handle = hash_con->con->icid; + *fw_cid = hash_con->con->fw_cid; + hash_add(cdev->connections, &hash_con->node, *handle); + + if (p_doorbell) + *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev), + *handle); + + return 0; +} + +static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_fcoe_con *hash_con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + hlist_del(&hash_con->node); + qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con); + kfree(hash_con); + + return 0; +} + +static int qed_fcoe_offload_conn(struct qed_dev *cdev, + u32 handle, + struct qed_fcoe_params_offload *conn_info) +{ + struct qed_hash_fcoe_con *hash_con; + struct qed_fcoe_conn *con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + + con->sq_pbl_addr = conn_info->sq_pbl_addr; + con->sq_curr_page_addr = conn_info->sq_curr_page_addr; + con->sq_next_page_addr = conn_info->sq_next_page_addr; + con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len; + con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val; + con->rec_tov_timer_val = conn_info->rec_tov_timer_val; + con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len; + con->vlan_tag = conn_info->vlan_tag; + con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3; + con->flags = conn_info->flags; + con->def_q_idx = conn_info->def_q_idx; + + con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) | + conn_info->src_mac[4]; + con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) | + conn_info->src_mac[2]; + con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) | + conn_info->src_mac[0]; + con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) | + conn_info->dst_mac[4]; + con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) | + conn_info->dst_mac[2]; + con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) | + conn_info->dst_mac[0]; + + con->s_id.addr_hi = conn_info->s_id.addr_hi; + con->s_id.addr_mid = conn_info->s_id.addr_mid; + con->s_id.addr_lo = conn_info->s_id.addr_lo; + con->d_id.addr_hi = conn_info->d_id.addr_hi; + con->d_id.addr_mid = conn_info->d_id.addr_mid; + con->d_id.addr_lo = conn_info->d_id.addr_lo; + + return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_fcoe_destroy_conn(struct qed_dev *cdev, + u32 handle, dma_addr_t terminate_params) +{ + struct qed_hash_fcoe_con *hash_con; + struct qed_fcoe_conn *con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + con->terminate_params = terminate_params; + + return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats) +{ + return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats); +} + +void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats) +{ + struct qed_fcoe_stats proto_stats; + + /* Retrieve FW statistics */ + memset(&proto_stats, 0, sizeof(proto_stats)); + if (qed_fcoe_stats(cdev, &proto_stats)) { + DP_VERBOSE(cdev, QED_MSG_STORAGE, + "Failed to collect FCoE statistics\n"); + return; + } + + /* Translate FW statistics into struct */ + stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt + + proto_stats.fcoe_rx_xfer_pkt_cnt + + proto_stats.fcoe_rx_other_pkt_cnt; + stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt + + proto_stats.fcoe_tx_xfer_pkt_cnt + + proto_stats.fcoe_tx_other_pkt_cnt; + stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt; + + /* Request protocol driver to fill-in the rest */ + if (cdev->protocol_ops.fcoe && cdev->ops_cookie) { + struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe; + void *cookie = cdev->ops_cookie; + + if (ops->get_login_failures) + stats->login_failure = ops->get_login_failures(cookie); + } +} + +static const struct qed_fcoe_ops qed_fcoe_ops_pass = { + .common = &qed_common_ops_pass, + .ll2 = &qed_ll2_ops_pass, + .fill_dev_info = &qed_fill_fcoe_dev_info, + .start = &qed_fcoe_start, + .stop = &qed_fcoe_stop, + .register_ops = &qed_register_fcoe_ops, + .acquire_conn = &qed_fcoe_acquire_conn, + .release_conn = &qed_fcoe_release_conn, + .offload_conn = &qed_fcoe_offload_conn, + .destroy_conn = &qed_fcoe_destroy_conn, + .get_stats = &qed_fcoe_stats, +}; + +const struct qed_fcoe_ops *qed_get_fcoe_ops(void) +{ + return &qed_fcoe_ops_pass; +} +EXPORT_SYMBOL(qed_get_fcoe_ops); + +void qed_put_fcoe_ops(void) +{ +} +EXPORT_SYMBOL(qed_put_fcoe_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h new file mode 100644 index 000000000000..472af34a171d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -0,0 +1,87 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _QED_FCOE_H +#define _QED_FCOE_H +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_fcoe_if.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_mcp.h" +#include "qed_sp.h" + +struct qed_fcoe_info { + spinlock_t lock; /* Connection resources. */ + struct list_head free_list; +}; + +#if IS_ENABLED(CONFIG_QED_FCOE) +struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn); + +void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info); + +void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info); +void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats); +#else /* CONFIG_QED_FCOE */ +static inline struct qed_fcoe_info * +qed_fcoe_alloc(struct qed_hwfn *p_hwfn) +{ + return NULL; +} + +static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn, + struct qed_fcoe_info *p_fcoe_info) +{ +} + +static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn, + struct qed_fcoe_info *p_fcoe_info) +{ +} + +static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats) +{ +} +#endif /* CONFIG_QED_FCOE */ + +#ifdef CONFIG_QED_LL2 +extern const struct qed_common_ops qed_common_ops_pass; +extern const struct qed_ll2_ops qed_ll2_ops_pass; +#endif + +#endif /* _QED_FCOE_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 5d31189288e8..37c2bfb663bb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -43,10 +43,12 @@ #include <linux/qed/common_hsi.h> #include <linux/qed/storage_common.h> #include <linux/qed/tcp_common.h> +#include <linux/qed/fcoe_common.h> #include <linux/qed/eth_common.h> #include <linux/qed/iscsi_common.h> #include <linux/qed/rdma_common.h> #include <linux/qed/roce_common.h> +#include <linux/qed/qed_fcoe_if.h> struct qed_hwfn; struct qed_ptt; @@ -937,7 +939,7 @@ struct mstorm_vf_zone { enum personality_type { BAD_PERSONALITY_TYP, PERSONALITY_ISCSI, - PERSONALITY_RESERVED2, + PERSONALITY_FCOE, PERSONALITY_RDMA_AND_ETH, PERSONALITY_RESERVED3, PERSONALITY_CORE, @@ -3473,6 +3475,10 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[43].base + ((pf_id) * IRO[43].m1)) +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ + (IRO[44].base + ((pf_id) * IRO[44].m1)) static const struct iro iro_arr[47] = { {0x0, 0x0, 0x0, 0x0, 0x8}, @@ -7407,6 +7413,769 @@ struct ystorm_roce_resp_conn_ag_ctx { __le32 reg3; }; +struct ystorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 cos; + u8 conf_version; + u8 eth_hdr_size; + __le16 stat_ram_addr; + __le16 mtu; + __le16 max_fc_payload_len; + __le16 tx_max_fc_pay_len; + u8 fcp_cmd_size; + u8 fcp_rsp_size; + __le16 mss; + struct regpair reserved; + u8 protection_info_flags; +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2 + u8 dst_protection_per_mss; + u8 src_protection_per_mss; + u8 ptu_log_page_size; + u8 flags; +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2 + u8 fcp_xfer_size; + u8 reserved3[2]; +}; + +struct fcoe_vlan_fields { + __le16 fields; +#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF +#define FCOE_VLAN_FIELDS_VID_SHIFT 0 +#define FCOE_VLAN_FIELDS_CLI_MASK 0x1 +#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 +#define FCOE_VLAN_FIELDS_PRI_MASK 0x7 +#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 +}; + +union fcoe_vlan_field_union { + struct fcoe_vlan_fields fields; + __le16 val; +}; + +union fcoe_vlan_vif_field_union { + union fcoe_vlan_field_union vlan; + __le16 vif; +}; + +struct pstorm_fcoe_eth_context_section { + u8 remote_addr_3; + u8 remote_addr_2; + u8 remote_addr_1; + u8 remote_addr_0; + u8 local_addr_1; + u8 local_addr_0; + u8 remote_addr_5; + u8 remote_addr_4; + u8 local_addr_5; + u8 local_addr_4; + u8 local_addr_3; + u8 local_addr_2; + union fcoe_vlan_vif_field_union vif_outer_vlan; + __le16 vif_outer_eth_type; + union fcoe_vlan_vif_field_union inner_vlan; + __le16 inner_eth_type; +}; + +struct pstorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 cos; + u8 conf_version; + u8 rsrv; + __le16 stat_ram_addr; + __le16 mss; + struct regpair abts_cleanup_addr; + struct pstorm_fcoe_eth_context_section eth; + u8 sid_2; + u8 sid_1; + u8 sid_0; + u8 flags; +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3 +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0xF +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 4 + u8 did_2; + u8 did_1; + u8 did_0; + u8 src_mac_index; + __le16 rec_rr_tov_val; + u8 q_relative_offset; + u8 reserved1; +}; + +struct xstorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 src_mac_index; + u8 conf_version; + u8 cached_wqes_avail; + __le16 stat_ram_addr; + u8 flags; +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5 + u8 cached_wqes_offset; + u8 reserved2; + u8 eth_hdr_size; + u8 seq_id; + u8 max_conc_seqs; + __le16 num_pages_in_pbl; + __le16 reserved; + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 mtu; + __le16 tx_max_fc_pay_len; + __le16 max_fc_payload_len; + __le16 min_frame_size; + __le16 sq_pbl_next_index; + __le16 respq_pbl_next_index; + u8 fcp_cmd_byte_credit; + u8 fcp_rsp_byte_credit; + __le16 protection_info; +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8 + __le16 xferq_pbl_next_index; + __le16 page_size; + u8 mid_seq; + u8 fcp_xfer_byte_credit; + u8 reserved1[2]; + struct fcoe_wqe cached_wqes[16]; +}; + +struct xstorm_fcoe_conn_ag_ctx { + u8 reserved0; + u8 fcoe_state; + u8 flags0; +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 + u8 flags2; +#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 + u8 flags7; +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 + u8 flags12; +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 sq_cons; + __le16 sq_prod; + __le16 xferq_prod; + __le16 xferq_cons; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 remain_io; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 respq_prod; + __le16 respq_cons; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; +}; + +struct ustorm_fcoe_conn_st_ctx { + struct regpair respq_pbl_addr; + __le16 num_pages_in_pbl; + u8 ptu_log_page_size; + u8 log_page_size; + __le16 respq_prod; + u8 reserved[2]; +}; + +struct tstorm_fcoe_conn_ag_ctx { + u8 reserved0; + u8 fcoe_state; + u8 flags0; +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 + u8 flags1; +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; +}; + +struct ustorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct tstorm_fcoe_conn_st_ctx { + __le16 stat_ram_addr; + __le16 rx_max_fc_payload_len; + __le16 e_d_tov_val; + u8 flags; +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1 +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2 + u8 timers_cleanup_invocation_cnt; + __le32 reserved1[2]; + __le32 dst_mac_address_bytes0to3; + __le16 dst_mac_address_bytes4to5; + __le16 ramrod_echo; + u8 flags1; +#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3 +#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2 + u8 q_relative_offset; + u8 bdq_resource_id; + u8 reserved0[5]; +}; + +struct mstorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct fcoe_mstorm_fcoe_conn_st_ctx_fp { + __le16 xfer_prod; + __le16 reserved1; + u8 protection_info; +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_MASK 0x1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_SHIFT 1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_MASK 0x3F +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_SHIFT 2 + u8 q_relative_offset; + u8 reserved2[2]; +}; + +struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp { + __le16 conn_id; + __le16 stat_ram_addr; + __le16 num_pages_in_pbl; + u8 ptu_log_page_size; + u8 log_page_size; + __le16 unsolicited_cq_count; + __le16 cmdq_count; + u8 bdq_resource_id; + u8 reserved0[3]; + struct regpair xferq_pbl_addr; + struct regpair reserved1; + struct regpair reserved2[3]; +}; + +struct mstorm_fcoe_conn_st_ctx { + struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp; + struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp; +}; + +struct fcoe_conn_context { + struct ystorm_fcoe_conn_st_ctx ystorm_st_context; + struct pstorm_fcoe_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_fcoe_conn_st_ctx xstorm_st_context; + struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context; + struct regpair xstorm_ag_padding[6]; + struct ustorm_fcoe_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context; + struct tstorm_fcoe_conn_st_ctx tstorm_st_context; + struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_conn_st_ctx mstorm_st_context; +}; + +struct fcoe_conn_offload_ramrod_params { + struct fcoe_conn_offload_ramrod_data offload_ramrod_data; +}; + +struct fcoe_conn_terminate_ramrod_params { + struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data; +}; + +enum fcoe_event_type { + FCOE_EVENT_INIT_FUNC, + FCOE_EVENT_DESTROY_FUNC, + FCOE_EVENT_STAT_FUNC, + FCOE_EVENT_OFFLOAD_CONN, + FCOE_EVENT_TERMINATE_CONN, + FCOE_EVENT_ERROR, + MAX_FCOE_EVENT_TYPE +}; + +struct fcoe_init_ramrod_params { + struct fcoe_init_func_ramrod_data init_ramrod_data; +}; + +enum fcoe_ramrod_cmd_id { + FCOE_RAMROD_CMD_ID_INIT_FUNC, + FCOE_RAMROD_CMD_ID_DESTROY_FUNC, + FCOE_RAMROD_CMD_ID_STAT_FUNC, + FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, + FCOE_RAMROD_CMD_ID_TERMINATE_CONN, + MAX_FCOE_RAMROD_CMD_ID +}; + +struct fcoe_stat_ramrod_params { + struct fcoe_stat_ramrod_data stat_ramrod_data; +}; + +struct ystorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + struct ystorm_iscsi_conn_st_ctx { __le32 reserved[4]; }; @@ -8435,6 +9204,7 @@ struct public_func { #define FUNC_MF_CFG_PROTOCOL_SHIFT 4 #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 +#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 #define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 #define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 @@ -8529,6 +9299,13 @@ struct lan_stats_stc { u32 rserved; }; +struct fcoe_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + struct ocbb_data_stc { u32 ocbb_host_addr; u32 ocsd_host_addr; @@ -8602,6 +9379,7 @@ union drv_union_data { struct drv_version_stc drv_version; struct lan_stats_stc lan_stats; + struct fcoe_stats_stc fcoe_stats; struct ocbb_data_stc ocbb_info; struct temperature_status_stc temp_info; struct resource_info resource; @@ -8905,6 +9683,7 @@ struct nvm_cfg1_glob { u32 misc_sig; u32 device_capabilities; #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 u32 power_dissipated; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 1f606516b6aa..899cad7f97ea 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -841,6 +841,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, if (pq_id > p_hwfn->qm_info.num_pf_rls) pq_id = p_hwfn->qm_info.offload_pq; break; + case PROTOCOLID_FCOE: + pq_id = p_hwfn->qm_info.offload_pq; + break; default: pq_id = 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 02c5d47cfc6d..9a0b9af10a57 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1130,6 +1130,9 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->qm_pq_id = cpu_to_le16(pq_id); switch (conn_type) { + case QED_LL2_TYPE_FCOE: + p_ramrod->conn_type = PROTOCOLID_FCOE; + break; case QED_LL2_TYPE_ISCSI: case QED_LL2_TYPE_ISCSI_OOO: p_ramrod->conn_type = PROTOCOLID_ISCSI; @@ -1458,6 +1461,15 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); + if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { + qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8906, 0, + QED_LLH_FILTER_ETHERTYPE); + qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8914, 0, + QED_LLH_FILTER_ETHERTYPE); + } + return rc; } @@ -1831,6 +1843,15 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { + qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8906, 0, + QED_LLH_FILTER_ETHERTYPE); + qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt, + 0x8914, 0, + QED_LLH_FILTER_ETHERTYPE); + } + return rc; } @@ -2039,6 +2060,10 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) } switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { + case QED_PCI_FCOE: + conn_type = QED_LL2_TYPE_FCOE; + gsi_enable = 0; + break; case QED_PCI_ISCSI: conn_type = QED_LL2_TYPE_ISCSI; gsi_enable = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index db3e4fc78e09..31a409033c41 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -54,7 +54,7 @@ enum qed_ll2_roce_flavor_type { }; enum qed_ll2_conn_type { - QED_LL2_TYPE_RESERVED, + QED_LL2_TYPE_FCOE, QED_LL2_TYPE_ISCSI, QED_LL2_TYPE_TEST, QED_LL2_TYPE_ISCSI_OOO, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 592e104687a7..942c03ca3b59 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -53,9 +53,11 @@ #include "qed_sp.h" #include "qed_dev_api.h" #include "qed_ll2.h" +#include "qed_fcoe.h" #include "qed_mcp.h" #include "qed_hw.h" #include "qed_selftest.h" +#include "qed_debug.h" #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) @@ -1603,6 +1605,8 @@ const struct qed_common_ops qed_common_ops_pass = { .sb_release = &qed_sb_release, .simd_handler_config = &qed_simd_handler_config, .simd_handler_clean = &qed_simd_handler_clean, + .dbg_grc = &qed_dbg_grc, + .dbg_grc_size = &qed_dbg_grc_size, .can_link_change = &qed_can_link_change, .set_link = &qed_set_link, .get_link = &qed_get_current_link, @@ -1636,6 +1640,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev, stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts; stats->lan_stats.fcs_err = -1; break; + case QED_MCP_FCOE_STATS: + qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); + break; default: DP_ERR(cdev, "Invalid protocol type = %d\n", type); return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index c8a877594032..7624a38cbd39 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1130,6 +1130,9 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, case FUNC_MF_CFG_PROTOCOL_ISCSI: *p_proto = QED_PCI_ISCSI; break; + case FUNC_MF_CFG_PROTOCOL_FCOE: + *p_proto = QED_PCI_FCOE; + break; case FUNC_MF_CFG_PROTOCOL_ROCE: DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); /* Fallthrough */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 363dce0f16b1..0792224ac219 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -37,6 +37,7 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/qed/qed_fcoe_if.h> #include "qed_hsi.h" struct qed_mcp_link_speed_params { diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 3b7edf6ff234..d59d9df60cd2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -110,6 +110,8 @@ 0x1e80000UL #define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ 0x5011f4UL +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE \ + 0x1f0164UL #define PRS_REG_SEARCH_TCP \ 0x1f0400UL #define PRS_REG_SEARCH_UDP \ @@ -120,6 +122,12 @@ 0x1f040cUL #define PRS_REG_SEARCH_OPENFLOW \ 0x1f0434UL +#define PRS_REG_SEARCH_TAG1 \ + 0x1f0444UL +#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \ + 0x1f0a0cUL +#define PRS_REG_SEARCH_TCP_FIRST_FRAG \ + 0x1f0410UL #define TM_REG_PF_ENABLE_CONN \ 0x2c043cUL #define TM_REG_PF_ENABLE_TASK \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 043882959606..30393ffaa8e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -109,6 +109,10 @@ union ramrod_data { struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; struct rdma_srq_modify_ramrod_data rdma_modify_srq; struct roce_init_func_ramrod_data roce_init_func; + struct fcoe_init_ramrod_params fcoe_init; + struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld; + struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate; + struct fcoe_stat_ramrod_params fcoe_stat; struct iscsi_slow_path_hdr iscsi_empty; struct iscsi_init_ramrod_params iscsi_init; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 097a72987572..6fb80f9ef446 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -386,6 +386,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, case QED_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; + case QED_PCI_FCOE: + p_ramrod->personality = PERSONALITY_FCOE; + break; case QED_PCI_ISCSI: p_ramrod->personality = PERSONALITY_ISCSI; break; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index ea38236f1ced..2991179c2fd0 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -1707,23 +1707,30 @@ static int ql_get_full_dup(struct ql3_adapter *qdev) return status; } -static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +static int ql_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct ql3_adapter *qdev = netdev_priv(ndev); + u32 supported, advertising; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = ql_supported_modes(qdev); + supported = ql_supported_modes(qdev); if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; } else { - ecmd->port = PORT_TP; - ecmd->phy_address = qdev->PHYAddr; + cmd->base.port = PORT_TP; + cmd->base.phy_address = qdev->PHYAddr; } - ecmd->advertising = ql_supported_modes(qdev); - ecmd->autoneg = ql_get_auto_cfg_status(qdev); - ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); - ecmd->duplex = ql_get_full_dup(qdev); + advertising = ql_supported_modes(qdev); + cmd->base.autoneg = ql_get_auto_cfg_status(qdev); + cmd->base.speed = ql_get_speed(qdev); + cmd->base.duplex = ql_get_full_dup(qdev); + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + return 0; } @@ -1769,12 +1776,12 @@ static void ql_get_pauseparam(struct net_device *ndev, } static const struct ethtool_ops ql3xxx_ethtool_ops = { - .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, .get_pauseparam = ql_get_pauseparam, + .get_link_ksettings = ql_get_link_ksettings, }; static int ql_populate_free_queue(struct ql3_adapter *qdev) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index daf05155b732..d344e9d43832 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -573,8 +573,10 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL); - if (ptr == NULL) - return -ENOMEM; + if (ptr == NULL) { + err = -ENOMEM; + goto err_out_free; + } tx_ring->hw_consumer = ptr; /* cmd desc ring */ diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index b29a819f721d..92e1c6d8b293 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -134,6 +134,22 @@ static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); +static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id) +{ + WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID); + return filter_id & (HUNT_FILTER_TBL_ROWS - 1); +} + +static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id) +{ + return filter_id / (HUNT_FILTER_TBL_ROWS * 2); +} + +static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx) +{ + return pri * HUNT_FILTER_TBL_ROWS * 2 + idx; +} + static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) { efx_dword_t reg; @@ -4194,7 +4210,7 @@ found: /* If successful, return the inserted filter ID */ if (rc == 0) - rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; + rc = efx_ef10_make_filter_id(match_pri, ins_index); wake_up_all(&table->waitq); out_unlock: @@ -4217,7 +4233,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, unsigned int priority_mask, u32 filter_id, bool by_index) { - unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; + unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); struct efx_ef10_filter_table *table = efx->filter_state; MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_HANDLE_OFST + @@ -4244,7 +4260,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, if (!spec || (!by_index && efx_ef10_filter_pri(table, spec) != - filter_id / HUNT_FILTER_TBL_ROWS)) { + efx_ef10_filter_get_unsafe_pri(filter_id))) { rc = -ENOENT; goto out_unlock; } @@ -4293,13 +4309,18 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, table->entry[filter_idx].handle); - rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, - inbuf, sizeof(inbuf), NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, + inbuf, sizeof(inbuf), NULL, 0, NULL); spin_lock_bh(&efx->filter_lock); - if (rc == 0) { + if ((rc == 0) || (rc == -ENOENT)) { + /* Filter removed OK or didn't actually exist */ kfree(spec); efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); + } else { + efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, + MC_CMD_FILTER_OP_IN_LEN, + NULL, 0, rc); } } @@ -4319,11 +4340,6 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx, filter_id, false); } -static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) -{ - return filter_id % HUNT_FILTER_TBL_ROWS; -} - static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id) @@ -4337,7 +4353,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id, struct efx_filter_spec *spec) { - unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; + unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); struct efx_ef10_filter_table *table = efx->filter_state; const struct efx_filter_spec *saved_spec; int rc; @@ -4346,7 +4362,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); if (saved_spec && saved_spec->priority == priority && efx_ef10_filter_pri(table, saved_spec) == - filter_id / HUNT_FILTER_TBL_ROWS) { + efx_ef10_filter_get_unsafe_pri(filter_id)) { *spec = *saved_spec; rc = 0; } else { @@ -4398,7 +4414,7 @@ static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; - return table->rx_match_count * HUNT_FILTER_TBL_ROWS; + return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2; } static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, @@ -4418,8 +4434,9 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, count = -EMSGSIZE; break; } - buf[count++] = (efx_ef10_filter_pri(table, spec) * - HUNT_FILTER_TBL_ROWS + + buf[count++] = + efx_ef10_make_filter_id( + efx_ef10_filter_pri(table, spec), filter_idx); } } @@ -4971,7 +4988,7 @@ static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) unsigned int filter_idx; if (*id != EFX_EF10_FILTER_ID_INVALID) { - filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id); + filter_idx = efx_ef10_filter_get_unsafe_id(*id); if (!table->entry[filter_idx].spec) netif_dbg(efx, drv, efx->net_dev, "marked null spec old %04x:%04x\n", *id, @@ -5106,7 +5123,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, rc = EFX_EF10_FILTER_ID_INVALID; } } - ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc); + ids[i] = efx_ef10_filter_get_unsafe_id(rc); } if (multicast && rollback) { @@ -5130,7 +5147,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, return rc; } else { vlan->default_filters[EFX_EF10_BCAST] = - efx_ef10_filter_get_unsafe_id(efx, rc); + efx_ef10_filter_get_unsafe_id(rc); } } @@ -5225,7 +5242,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, id = &vlan->default_filters[map[encap_type]]; EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); - *id = efx_ef10_filter_get_unsafe_id(efx, rc); + *id = efx_ef10_filter_get_unsafe_id(rc); if (!nic_data->workaround_26807 && !encap_type) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, @@ -5250,7 +5267,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, vlan->default_filters[EFX_EF10_BCAST] != EFX_EF10_FILTER_ID_INVALID); vlan->default_filters[EFX_EF10_BCAST] = - efx_ef10_filter_get_unsafe_id(efx, rc); + efx_ef10_filter_get_unsafe_id(rc); } } rc = 0; @@ -5372,7 +5389,7 @@ restore_filters: if (rc2) goto reset_nic; - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return rc; @@ -5658,7 +5675,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) if (was_enabled) efx_net_open(efx->net_dev); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); #ifdef CONFIG_SFC_SRIOV if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { @@ -6110,7 +6127,7 @@ static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) if (!(nic_data->datapath_caps & (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return 0; } @@ -6182,7 +6199,7 @@ static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) * trigger a re-attach. Since there won't be an MC reset, we * have to do the attach ourselves. */ - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); } return rc; diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index ed4b14283461..b7e4345c990d 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -549,7 +549,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) vf->efx->type->filter_table_probe(vf->efx); up_write(&vf->efx->filter_sem); efx_net_open(vf->efx->net_dev); - netif_device_attach(vf->efx->net_dev); + efx_device_attach_if_not_resetting(vf->efx); } return 0; @@ -667,7 +667,7 @@ restore_filters: if (rc2) goto reset_nic; - netif_device_attach(vf->efx->net_dev); + efx_device_attach_if_not_resetting(vf->efx); } return rc; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 8c4c273643dc..334bcc6df6b2 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -876,7 +876,7 @@ out: efx_schedule_reset(efx, RESET_TYPE_DISABLE); } else { efx_start_all(efx); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); } return rc; @@ -2182,6 +2182,8 @@ int efx_net_open(struct net_device *net_dev) efx_link_status_changed(efx); efx_start_all(efx); + if (efx->state == STATE_DISABLED || efx->reset_pending) + netif_device_detach(efx->net_dev); efx_selftest_async_start(efx); return 0; } @@ -2248,7 +2250,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) mutex_unlock(&efx->mac_lock); efx_start_all(efx); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return 0; } @@ -2744,7 +2746,7 @@ out: efx->state = STATE_DISABLED; } else { netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); } return rc; } @@ -3417,7 +3419,7 @@ static int efx_pm_thaw(struct device *dev) efx_start_all(efx); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); efx->state = STATE_READY; diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 342ae16e1f2d..ee14662415c5 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -276,6 +276,12 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) netif_tx_unlock_bh(dev); } +static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx) +{ + if ((efx->state != STATE_DISABLED) && !efx->reset_pending) + netif_device_attach(efx->net_dev); +} + static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) { if (WARN_ON(down_read_trylock(sem))) { diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index cd38b44ae23a..dab286a337a6 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -768,7 +768,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, __efx_reconfigure_port(efx); mutex_unlock(&efx->mac_lock); - netif_device_attach(efx->net_dev); + efx_device_attach_if_not_resetting(efx); return rc_test; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index fa6e9704c077..e5db6ac36235 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -302,6 +302,122 @@ static const struct rk_gmac_ops rk3288_ops = { .set_rmii_speed = rk3288_set_rmii_speed, }; +#define RK3328_GRF_MAC_CON0 0x0900 +#define RK3328_GRF_MAC_CON1 0x0904 + +/* RK3328_GRF_MAC_CON0 */ +#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) +#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) + +/* RK3328_GRF_MAC_CON1 */ +#define RK3328_GMAC_PHY_INTF_SEL_RGMII \ + (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) +#define RK3328_GMAC_PHY_INTF_SEL_RMII \ + (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) +#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3) +#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) +#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define RK3328_GMAC_SPEED_100M GRF_BIT(2) +#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7) +#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) +#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12)) +#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12)) +#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12)) +#define RK3328_GMAC_RMII_MODE GRF_BIT(9) +#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9) +#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0) +#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0) +#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) +#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0) + +static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, + int tx_delay, int rx_delay) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_PHY_INTF_SEL_RGMII | + RK3328_GMAC_RMII_MODE_CLR | + RK3328_GMAC_RXCLK_DLY_ENABLE | + RK3328_GMAC_TXCLK_DLY_ENABLE); + + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON0, + RK3328_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3328_GMAC_CLK_TX_DL_CFG(tx_delay)); +} + +static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_PHY_INTF_SEL_RMII | + RK3328_GMAC_RMII_MODE); + + /* set MAC to RMII mode */ + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, GRF_BIT(11)); +} + +static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_CLK_2_5M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_CLK_25M); + else if (speed == 1000) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_CLK_125M); + else + dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); +} + +static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_RMII_CLK_2_5M | + RK3328_GMAC_SPEED_10M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + RK3328_GMAC_RMII_CLK_25M | + RK3328_GMAC_SPEED_100M); + else + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); +} + +static const struct rk_gmac_ops rk3328_ops = { + .set_to_rgmii = rk3328_set_to_rgmii, + .set_to_rmii = rk3328_set_to_rmii, + .set_rgmii_speed = rk3328_set_rgmii_speed, + .set_rmii_speed = rk3328_set_rmii_speed, +}; + #define RK3366_GRF_SOC_CON6 0x0418 #define RK3366_GRF_SOC_CON7 0x041c @@ -1006,6 +1122,7 @@ static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); static const struct of_device_id rk_gmac_dwmac_match[] = { { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, + { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops }, diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e86f226b258e..9f3d9c67e3fe 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -3032,7 +3032,7 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_dma_ret; } - ale_params.dev = &ndev->dev; + ale_params.dev = &pdev->dev; ale_params.ale_ageout = ale_ageout; ale_params.ale_entries = data->ale_entries; ale_params.ale_ports = data->slaves; @@ -3207,7 +3207,7 @@ static int cpsw_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); - struct cpsw_common *cpsw = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); /* Select default pin state */ pinctrl_pm_select_default_state(dev); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index e3070fd88bce..69e31ceccfae 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -100,6 +100,14 @@ /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ #define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) +#ifdef __BIG_ENDIAN +#define xemaclite_readl ioread32be +#define xemaclite_writel iowrite32be +#else +#define xemaclite_readl ioread32 +#define xemaclite_writel iowrite32 +#endif + /** * struct net_local - Our private per device data * @ndev: instance of the network device @@ -156,15 +164,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) u32 reg_data; /* Enable the Tx interrupts for the first Buffer */ - reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); - __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, - drvdata->base_addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); + xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK, + drvdata->base_addr + XEL_TSR_OFFSET); /* Enable the Rx interrupts for the first buffer */ - __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); + xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); /* Enable the Global Interrupt Enable */ - __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); + xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); } /** @@ -179,17 +187,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) u32 reg_data; /* Disable the Global Interrupt Enable */ - __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); + xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); /* Disable the Tx interrupts for the first buffer */ - reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); - __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), - drvdata->base_addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); + xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), + drvdata->base_addr + XEL_TSR_OFFSET); /* Disable the Rx interrupts for the first buffer */ - reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); - __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), - drvdata->base_addr + XEL_RSR_OFFSET); + reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET); + xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), + drvdata->base_addr + XEL_RSR_OFFSET); } /** @@ -321,7 +329,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, byte_count = ETH_FRAME_LEN; /* Check if the expected buffer is available */ - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { @@ -334,7 +342,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) != 0) @@ -345,16 +353,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, /* Write the frame to the buffer */ xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); - __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), - addr + XEL_TPLR_OFFSET); + xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK), + addr + XEL_TPLR_OFFSET); /* Update the Tx Status Register to indicate that there is a * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * is used by the interrupt handler to check whether a frame * has been transmitted */ - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); - __raw_writel(reg_data, addr + XEL_TSR_OFFSET); + xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET); return 0; } @@ -369,7 +377,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, * * Return: Total number of bytes received */ -static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) +static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) { void __iomem *addr; u16 length, proto_type; @@ -379,7 +387,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); /* Verify which buffer has valid data */ - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { if (drvdata->rx_ping_pong != 0) @@ -396,27 +404,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) return 0; /* No data was available */ /* Verify that buffer has valid data */ - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) != XEL_RSR_RECV_DONE_MASK) return 0; /* No data was available */ } /* Get the protocol type of the ethernet frame that arrived */ - proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + + proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); /* Check if received ethernet frame is a raw ethernet frame * or an IP packet or an ARP packet */ - if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + if (proto_type > ETH_DATA_LEN) { if (proto_type == ETH_P_IP) { - length = ((ntohl(__raw_readl(addr + + length = ((ntohl(xemaclite_readl(addr + XEL_HEADER_IP_LENGTH_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); + length = min_t(u16, length, ETH_DATA_LEN); length += ETH_HLEN + ETH_FCS_LEN; } else if (proto_type == ETH_P_ARP) @@ -429,14 +438,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) /* Use the length in the frame, plus the header and trailer */ length = proto_type + ETH_HLEN + ETH_FCS_LEN; + if (WARN_ON(length > maxlen)) + length = maxlen; + /* Read from the EmacLite device */ xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), data, length); /* Acknowledge the frame */ - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); reg_data &= ~XEL_RSR_RECV_DONE_MASK; - __raw_writel(reg_data, addr + XEL_RSR_OFFSET); + xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET); return length; } @@ -463,14 +475,14 @@ static void xemaclite_update_address(struct net_local *drvdata, xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); - __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); + xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); /* Update the MAC address in the EmacLite */ - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); - __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); + xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); /* Wait for EmacLite to finish with the MAC address update */ - while ((__raw_readl(addr + XEL_TSR_OFFSET) & + while ((xemaclite_readl(addr + XEL_TSR_OFFSET) & XEL_TSR_PROG_MAC_ADDR) != 0) ; } @@ -603,7 +615,7 @@ static void xemaclite_rx_handler(struct net_device *dev) skb_reserve(skb, 2); - len = xemaclite_recv_data(lp, (u8 *) skb->data); + len = xemaclite_recv_data(lp, (u8 *) skb->data, len); if (!len) { dev->stats.rx_errors++; @@ -640,32 +652,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) u32 tx_status; /* Check if there is Rx Data available */ - if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & + if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) || - (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) + (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK)) xemaclite_rx_handler(dev); /* Check if the Transmission for the first buffer is completed */ - tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); + tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; - __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); + xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET); tx_complete = true; } /* Check if the Transmission for the second buffer is completed */ - tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); + tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; - __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); + xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + + XEL_TSR_OFFSET); tx_complete = true; } @@ -698,7 +710,7 @@ static int xemaclite_mdio_wait(struct net_local *lp) /* wait for the MDIO interface to not be busy or timeout after some time. */ - while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & + while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & XEL_MDIOCTRL_MDIOSTS_MASK) { if (time_before_eq(end, jiffies)) { WARN_ON(1); @@ -734,17 +746,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) * MDIO Address register. Set the Status bit in the MDIO Control * register to start a MDIO read transaction. */ - ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); - __raw_writel(XEL_MDIOADDR_OP_MASK | - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), - lp->base_addr + XEL_MDIOADDR_OFFSET); - __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, - lp->base_addr + XEL_MDIOCTRL_OFFSET); + ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); + xemaclite_writel(XEL_MDIOADDR_OP_MASK | + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), + lp->base_addr + XEL_MDIOADDR_OFFSET); + xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); if (xemaclite_mdio_wait(lp)) return -ETIMEDOUT; - rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); + rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET); dev_dbg(&lp->ndev->dev, "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", @@ -781,13 +793,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, * Data register. Finally, set the Status bit in the MDIO Control * register to start a MDIO write transaction. */ - ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); - __raw_writel(~XEL_MDIOADDR_OP_MASK & - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), - lp->base_addr + XEL_MDIOADDR_OFFSET); - __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); - __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, - lp->base_addr + XEL_MDIOCTRL_OFFSET); + ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); + xemaclite_writel(~XEL_MDIOADDR_OP_MASK & + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), + lp->base_addr + XEL_MDIOADDR_OFFSET); + xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); + xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); return 0; } @@ -834,8 +846,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) /* Enable the MDIO bus by asserting the enable bit in MDIO Control * register. */ - __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, - lp->base_addr + XEL_MDIOCTRL_OFFSET); + xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); bus = mdiobus_alloc(); if (!bus) { @@ -1126,8 +1138,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) } /* Clear the Tx CSR's in case this is a restart */ - __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); - __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); + xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET); + xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); /* Set the MAC address in the EmacLite device */ xemaclite_update_address(lp, ndev->dev_addr); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 425285f36595..2d3cdb026a99 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -920,7 +920,7 @@ static void netvsc_get_stats64(struct net_device *net, } t->tx_dropped = net->stats.tx_dropped; - t->tx_errors = net->stats.tx_dropped; + t->tx_errors = net->stats.tx_errors; t->rx_dropped = net->stats.rx_dropped; t->rx_errors = net->stats.rx_errors; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7cc1b7dcfe05..d6f7838455dd 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -580,7 +580,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) return 0; case SIOCSHWTSTAMP: - if (phydev->drv->hwtstamp) + if (phydev->drv && phydev->drv->hwtstamp) return phydev->drv->hwtstamp(phydev, ifr); /* fall through */ @@ -603,6 +603,9 @@ int phy_start_aneg(struct phy_device *phydev) { int err; + if (!phydev->drv) + return -EIO; + mutex_lock(&phydev->lock); if (AUTONEG_DISABLE == phydev->autoneg) @@ -975,7 +978,7 @@ void phy_state_machine(struct work_struct *work) old_state = phydev->state; - if (phydev->drv->link_change_notify) + if (phydev->drv && phydev->drv->link_change_notify) phydev->drv->link_change_notify(phydev); switch (phydev->state) { @@ -1286,6 +1289,9 @@ EXPORT_SYMBOL(phy_write_mmd_indirect); */ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) { + if (!phydev->drv) + return -EIO; + /* According to 802.3az,the EEE is supported only in full duplex-mode. * Also EEE feature is active when core is operating with MII, GMII * or RGMII (all kinds). Internal PHYs are also allowed to proceed and @@ -1363,6 +1369,9 @@ EXPORT_SYMBOL(phy_init_eee); */ int phy_get_eee_err(struct phy_device *phydev) { + if (!phydev->drv) + return -EIO; + return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS); } EXPORT_SYMBOL(phy_get_eee_err); @@ -1379,6 +1388,9 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) { int val; + if (!phydev->drv) + return -EIO; + /* Get Supported EEE */ val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS); if (val < 0) @@ -1412,6 +1424,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) { int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); + if (!phydev->drv) + return -EIO; + /* Mask prohibited EEE modes */ val &= ~phydev->eee_broken_modes; @@ -1423,7 +1438,7 @@ EXPORT_SYMBOL(phy_ethtool_set_eee); int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { - if (phydev->drv->set_wol) + if (phydev->drv && phydev->drv->set_wol) return phydev->drv->set_wol(phydev, wol); return -EOPNOTSUPP; @@ -1432,7 +1447,7 @@ EXPORT_SYMBOL(phy_ethtool_set_wol); void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { - if (phydev->drv->get_wol) + if (phydev->drv && phydev->drv->get_wol) phydev->drv->get_wol(phydev, wol); } EXPORT_SYMBOL(phy_ethtool_get_wol); @@ -1468,6 +1483,9 @@ int phy_ethtool_nway_reset(struct net_device *ndev) if (!phydev) return -ENODEV; + if (!phydev->drv) + return -EIO; + return genphy_restart_aneg(phydev); } EXPORT_SYMBOL(phy_ethtool_nway_reset); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8c8e15b8739d..daec6555f3b1 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1094,7 +1094,7 @@ int phy_suspend(struct phy_device *phydev) if (wol.wolopts) return -EBUSY; - if (phydrv->suspend) + if (phydev->drv && phydrv->suspend) ret = phydrv->suspend(phydev); if (ret) @@ -1111,7 +1111,7 @@ int phy_resume(struct phy_device *phydev) struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); int ret = 0; - if (phydrv->resume) + if (phydev->drv && phydrv->resume) ret = phydrv->resume(phydev); if (ret) @@ -1784,11 +1784,13 @@ static int phy_remove(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); + cancel_delayed_work_sync(&phydev->state_queue); + mutex_lock(&phydev->lock); phydev->state = PHY_DOWN; mutex_unlock(&phydev->lock); - if (phydev->drv->remove) + if (phydev->drv && phydev->drv->remove) phydev->drv->remove(phydev); phydev->drv = NULL; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 11e28530c83c..05a83dbc910d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -719,13 +719,13 @@ xdp_xmit: return NULL; } -static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len) +static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, unsigned int len) { struct net_device *dev = vi->dev; - struct virtnet_stats *stats = this_cpu_ptr(vi->stats); struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; + int ret; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); @@ -739,7 +739,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, } else { dev_kfree_skb(buf); } - return; + return 0; } if (vi->mergeable_rx_bufs) @@ -750,14 +750,11 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, skb = receive_small(dev, vi, rq, buf, len); if (unlikely(!skb)) - return; + return 0; hdr = skb_vnet_hdr(skb); - u64_stats_update_begin(&stats->rx_syncp); - stats->rx_bytes += skb->len; - stats->rx_packets++; - u64_stats_update_end(&stats->rx_syncp); + ret = skb->len; if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -775,11 +772,12 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, ntohs(skb->protocol), skb->len, skb->pkt_type); napi_gro_receive(&rq->napi, skb); - return; + return ret; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); + return 0; } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) @@ -994,12 +992,13 @@ static void refill_work(struct work_struct *work) static int virtnet_receive(struct receive_queue *rq, int budget) { struct virtnet_info *vi = rq->vq->vdev->priv; - unsigned int len, received = 0; + unsigned int len, received = 0, bytes = 0; void *buf; + struct virtnet_stats *stats = this_cpu_ptr(vi->stats); while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - receive_buf(vi, rq, buf, len); + bytes += receive_buf(vi, rq, buf, len); received++; } @@ -1008,6 +1007,11 @@ static int virtnet_receive(struct receive_queue *rq, int budget) schedule_delayed_work(&vi->refill, 0); } + u64_stats_update_begin(&stats->rx_syncp); + stats->rx_bytes += bytes; + stats->rx_packets += received; + u64_stats_update_end(&stats->rx_syncp); + return received; } @@ -1056,17 +1060,28 @@ static void free_old_xmit_skbs(struct send_queue *sq) unsigned int len; struct virtnet_info *vi = sq->vq->vdev->priv; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); + unsigned int packets = 0; + unsigned int bytes = 0; while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { pr_debug("Sent skb %p\n", skb); - u64_stats_update_begin(&stats->tx_syncp); - stats->tx_bytes += skb->len; - stats->tx_packets++; - u64_stats_update_end(&stats->tx_syncp); + bytes += skb->len; + packets++; dev_kfree_skb_any(skb); } + + /* Avoid overhead when no packets have been processed + * happens when called speculatively from start_xmit. + */ + if (!packets) + return; + + u64_stats_update_begin(&stats->tx_syncp); + stats->tx_bytes += bytes; + stats->tx_packets += packets; + u64_stats_update_end(&stats->tx_syncp); } static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 4e27c5b09600..c5db8f8563c1 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2489,7 +2489,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, info->key.u.ipv4.dst, - &info->key.u.ipv4.src, dport, sport, NULL, info); + &info->key.u.ipv4.src, dport, sport, + &info->dst_cache, info); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); @@ -2500,7 +2501,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, info->key.label, &info->key.u.ipv6.dst, - &info->key.u.ipv6.src, dport, sport, NULL, info); + &info->key.u.ipv6.src, dport, sport, + &info->dst_cache, info); if (IS_ERR(ndst)) return PTR_ERR(ndst); dst_release(ndst); diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index d869533d2e79..a5045b5279d7 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -1175,3 +1175,4 @@ static struct platform_driver ucc_hdlc_driver = { }; module_platform_driver(ucc_hdlc_driver); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index c27e7ea38a65..59729aa8cd82 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -351,7 +351,7 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar, char *buf, size_t buf_len) { - unsigned int len = 0; + size_t len = 0; int i; for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { @@ -456,7 +456,10 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, dir = "."; snprintf(filename, sizeof(filename), "%s/%s", dir, file); - ret = request_firmware(&fw, filename, ar->dev); + ret = request_firmware_direct(&fw, filename, ar->dev); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", + filename, ret); + if (ret) return ERR_PTR(ret); @@ -698,7 +701,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 || (board_id == 0)) { - ath10k_warn(ar, "board id is not exist in otp, ignore it\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "board id does not exist in otp, ignore it\n"); return -EOPNOTSUPP; } @@ -1122,7 +1126,7 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name, size_t name_len) { /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ - char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH]; + char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; if (ar->id.bmi_ids_valid) { scnprintf(name, name_len, @@ -1168,7 +1172,8 @@ static int ath10k_core_fetch_board_file(struct ath10k *ar) ar->bd_api = 1; ret = ath10k_core_fetch_board_data_api_1(ar); if (ret) { - ath10k_err(ar, "failed to fetch board data\n"); + ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n", + ar->hw_params.fw.dir); return ret; } @@ -1189,12 +1194,8 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, /* first fetch the firmware file (firmware-*.bin) */ fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); - if (IS_ERR(fw_file->firmware)) { - ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n", - ar->hw_params.fw.dir, name, - PTR_ERR(fw_file->firmware)); + if (IS_ERR(fw_file->firmware)) return PTR_ERR(fw_file->firmware); - } data = fw_file->firmware->data; len = fw_file->firmware->size; @@ -1358,44 +1359,39 @@ err: return ret; } +static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name, + size_t fw_name_len, int fw_api) +{ + scnprintf(fw_name, fw_name_len, "%s-%d.bin", ATH10K_FW_FILE_BASE, fw_api); +} + static int ath10k_core_fetch_firmware_files(struct ath10k *ar) { - int ret; + int ret, i; + char fw_name[100]; /* calibration file is optional, don't check for any errors */ ath10k_fetch_cal_file(ar); - ar->fw_api = 5; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); - - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE, - &ar->normal_mode_fw.fw_file); - if (ret == 0) - goto success; - - ar->fw_api = 4; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + for (i = ATH10K_FW_API_MAX; i >= ATH10K_FW_API_MIN; i--) { + ar->fw_api = i; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", + ar->fw_api); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE, - &ar->normal_mode_fw.fw_file); - if (ret == 0) - goto success; - - ar->fw_api = 3; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + ath10k_core_get_fw_name(ar, fw_name, sizeof(fw_name), ar->fw_api); + ret = ath10k_core_fetch_firmware_api_n(ar, fw_name, + &ar->normal_mode_fw.fw_file); + if (!ret) + goto success; + } - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE, - &ar->normal_mode_fw.fw_file); - if (ret == 0) - goto success; + /* we end up here if we couldn't fetch any firmware */ - ar->fw_api = 2; - ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + ath10k_err(ar, "Failed to find firmware-N.bin (N between %d and %d) from %s: %d", + ATH10K_FW_API_MIN, ATH10K_FW_API_MAX, ar->hw_params.fw.dir, + ret); - ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE, - &ar->normal_mode_fw.fw_file); - if (ret) - return ret; + return ret; success: ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index d5ff0f4ef5ce..fb0ade3adb07 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -237,7 +237,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file, { struct ath10k *ar = file->private_data; char *buf; - unsigned int len = 0, buf_len = 4096; + size_t len = 0, buf_len = 4096; const char *name; ssize_t ret_cnt; bool enabled; @@ -529,7 +529,7 @@ static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; - unsigned int len = strlen(buf); + size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -547,17 +547,16 @@ static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - int ret, len, buf_len; + int ret; + size_t len = 0, buf_len = 500; char *buf; - buf_len = 500; buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; spin_lock_bh(&ar->data_lock); - len = 0; len += scnprintf(buf + len, buf_len - len, "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter); len += scnprintf(buf + len, buf_len - len, @@ -696,7 +695,7 @@ static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - unsigned int len; + size_t len; char buf[50]; len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id); @@ -733,8 +732,8 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar, struct ath10k_ce_crash_hdr *ce_hdr; struct ath10k_dump_file_data *dump_data; struct ath10k_tlv_dump_data *dump_tlv; - int hdr_len = sizeof(*dump_data); - unsigned int len, sofar = 0; + size_t hdr_len = sizeof(*dump_data); + size_t len, sofar = 0; unsigned char *buf; len = hdr_len; @@ -900,7 +899,7 @@ static ssize_t ath10k_reg_addr_read(struct file *file, { struct ath10k *ar = file->private_data; u8 buf[32]; - unsigned int len = 0; + size_t len = 0; u32 reg_addr; mutex_lock(&ar->conf_mutex); @@ -948,7 +947,7 @@ static ssize_t ath10k_reg_value_read(struct file *file, { struct ath10k *ar = file->private_data; u8 buf[48]; - unsigned int len; + size_t len; u32 reg_addr, reg_val; int ret; @@ -1171,7 +1170,7 @@ static ssize_t ath10k_read_htt_stats_mask(struct file *file, { struct ath10k *ar = file->private_data; char buf[32]; - unsigned int len; + size_t len; len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask); @@ -1225,7 +1224,7 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file, struct ath10k *ar = file->private_data; char buf[64]; u8 amsdu, ampdu; - unsigned int len; + size_t len; mutex_lock(&ar->conf_mutex); @@ -1285,7 +1284,7 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - unsigned int len; + size_t len; char buf[96]; len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n", @@ -1611,11 +1610,10 @@ static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - int len = 0; + size_t len; char buf[32]; - len = scnprintf(buf, sizeof(buf) - len, "%d\n", - ar->ani_enabled); + len = scnprintf(buf, sizeof(buf), "%d\n", ar->ani_enabled); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -1640,11 +1638,10 @@ static ssize_t ath10k_read_nf_cal_period(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - unsigned int len; + size_t len; char buf[32]; - len = scnprintf(buf, sizeof(buf), "%d\n", - ar->debug.nf_cal_period); + len = scnprintf(buf, sizeof(buf), "%d\n", ar->debug.nf_cal_period); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -1740,9 +1737,10 @@ void ath10k_debug_tpc_stats_process(struct ath10k *ar, } static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, - unsigned int j, char *buf, unsigned int *len) + unsigned int j, char *buf, size_t *len) { - unsigned int i, buf_len; + int i; + size_t buf_len; static const char table_str[][5] = { "CDD", "STBC", "TXBF" }; @@ -1782,7 +1780,8 @@ static void ath10k_tpc_stats_fill(struct ath10k *ar, struct ath10k_tpc_stats *tpc_stats, char *buf) { - unsigned int len, j, buf_len; + int j; + size_t len, buf_len; len = 0; buf_len = ATH10K_TPC_CONFIG_BUF_SIZE; @@ -1916,7 +1915,7 @@ static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; - unsigned int len = strlen(buf); + size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -2340,7 +2339,7 @@ static ssize_t ath10k_debug_fw_checksums_read(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; - unsigned int len = 0, buf_len = 4096; + size_t len = 0, buf_len = 4096; ssize_t ret_cnt; char *buf; @@ -2556,7 +2555,7 @@ void ath10k_dbg_dump(struct ath10k *ar, const void *buf, size_t len) { char linebuf[256]; - unsigned int linebuflen; + size_t linebuflen; const void *ptr; if (ath10k_debug_mask & mask) { diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 38aa7c95732e..f0fda0f2b3b4 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -128,6 +128,10 @@ enum qca9377_chip_id_rev { #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin" #define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234 +#define ATH10K_FW_FILE_BASE "firmware" +#define ATH10K_FW_API_MAX 5 +#define ATH10K_FW_API_MIN 2 + #define ATH10K_FW_API2_FILE "firmware-2.bin" #define ATH10K_FW_API3_FILE "firmware-3.bin" diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 9977829a6ec4..3029f257a19a 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1993,7 +1993,7 @@ static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, { struct sk_buff *skb = data; struct ieee80211_mgmt *mgmt = (void *)skb->data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; if (vif->type != NL80211_IFTYPE_STATION) return; @@ -2016,7 +2016,7 @@ static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { u32 *vdev_id = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k *ar = arvif->ar; struct ieee80211_hw *hw = ar->hw; @@ -2083,7 +2083,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; u32 aid; lockdep_assert_held(&ar->conf_mutex); @@ -2159,7 +2159,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; @@ -2222,7 +2222,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, struct wmi_peer_assoc_complete_arg *arg) { const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -2446,7 +2446,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, struct wmi_peer_assoc_complete_arg *arg) { const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; enum nl80211_band band; const u16 *vht_mcs_mask; @@ -2507,7 +2507,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: @@ -2574,7 +2574,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, struct ieee80211_sta *sta, struct wmi_peer_assoc_complete_arg *arg) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -2689,7 +2689,7 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, struct ieee80211_vif *vif, struct ieee80211_sta_vht_cap vht_cap) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; int ret; u32 param; u32 value; @@ -2756,7 +2756,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_bss_conf *bss_conf) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct wmi_peer_assoc_complete_arg peer_arg; @@ -2849,7 +2849,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ieee80211_sta_vht_cap vht_cap = {}; int ret; @@ -2882,7 +2882,7 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ieee80211_sta *sta, bool reassoc) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_peer_assoc_complete_arg peer_arg; int ret = 0; @@ -2949,7 +2949,7 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; int ret = 0; lockdep_assert_held(&ar->conf_mutex); @@ -3175,7 +3175,7 @@ static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath10k *ar = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; if (arvif->tx_paused) return; @@ -3262,7 +3262,7 @@ struct ath10k_mac_tx_pause { static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_mac_tx_pause *arg = data; if (arvif->vdev_id != arg->vdev_id) @@ -3358,7 +3358,7 @@ static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, return false; if (vif) - return !ath10k_vif_to_arvif(vif)->nohwcrypt; + return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; return true; } @@ -3423,7 +3423,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; /* This is case only for P2P_GO */ if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) @@ -4849,7 +4849,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_peer *peer; enum wmi_sta_powersave_param param; int ret = 0; @@ -5185,7 +5185,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_peer *peer; int ret; int i; @@ -5320,7 +5320,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, u32 changed) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; int ret = 0; u32 vdev_param, pdev_param, slottime, preamble; @@ -5512,7 +5512,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_scan_request *hw_req) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_scan_request *req = &hw_req->req; struct wmi_start_scan_arg arg; int ret = 0; @@ -5644,7 +5644,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_key_conf *key) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_peer *peer; const u8 *peer_addr; bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || @@ -5783,7 +5783,7 @@ static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, int keyidx) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; int ret; mutex_lock(&arvif->ar->conf_mutex); @@ -5964,7 +5964,7 @@ static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; int *num_tdls_vifs = data; if (vif->type != NL80211_IFTYPE_STATION) @@ -5992,7 +5992,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, enum ieee80211_sta_state new_state) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; struct ath10k_peer *peer; int ret = 0; @@ -6227,7 +6227,7 @@ exit: static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, u16 ac, bool enable) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_sta_uapsd_auto_trig_arg arg = {}; u32 prio = 0, acc = 0; u32 value = 0; @@ -6335,7 +6335,7 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw, const struct ieee80211_tx_queue_params *params) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_wmm_params_arg *p = NULL; int ret; @@ -6409,7 +6409,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, enum ieee80211_roc_type type) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct wmi_start_scan_arg arg; int ret = 0; u32 scan_time_msec; @@ -6909,7 +6909,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_chan_def def; struct ath10k *ar = arvif->ar; enum nl80211_band band; @@ -7060,7 +7060,7 @@ static void ath10k_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, s64 tsf_offset) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; u32 offset, vdev_param; int ret; @@ -7085,7 +7085,7 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_ampdu_params *params) { struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; @@ -7183,7 +7183,7 @@ ath10k_mac_update_vif_chan(struct ath10k *ar, ath10k_monitor_stop(ar); for (i = 0; i < n_vifs; i++) { - arvif = ath10k_vif_to_arvif(vifs[i].vif); + arvif = (void *)vifs[i].vif->drv_priv; ath10k_dbg(ar, ATH10K_DBG_MAC, "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", @@ -7216,7 +7216,7 @@ ath10k_mac_update_vif_chan(struct ath10k *ar, spin_unlock_bh(&ar->data_lock); for (i = 0; i < n_vifs; i++) { - arvif = ath10k_vif_to_arvif(vifs[i].vif); + arvif = (void *)vifs[i].vif->drv_priv; if (WARN_ON(!arvif->is_started)) continue; @@ -7873,7 +7873,7 @@ static void ath10k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath10k_vif_iter *arvif_iter = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; if (arvif->vdev_id == arvif_iter->vdev_id) arvif_iter->arvif = arvif; diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 1bd29ecfcdcc..553747bc19ed 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -83,17 +83,12 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, u8 tid); int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val); -static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) -{ - return (struct ath10k_vif *)vif->drv_priv; -} - static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { if (arvif->tx_seq_no == 0) diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c index c0b6ffaf3ec1..7e621ee194e3 100644 --- a/drivers/net/wireless/ath/ath10k/p2p.c +++ b/drivers/net/wireless/ath/ath10k/p2p.c @@ -132,7 +132,7 @@ struct ath10k_p2p_noa_arg { static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; struct ath10k_p2p_noa_arg *arg = data; if (arvif->vdev_id != arg->vdev_id) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 5d2f9b9922d3..6094372307aa 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1651,6 +1651,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); + napi_enable(&ar->napi); + ath10k_pci_irq_enable(ar); ath10k_pci_rx_post(ar); @@ -2535,7 +2537,6 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ath10k_err(ar, "could not wake up target CPU: %d\n", ret); goto err_ce; } - napi_enable(&ar->napi); return 0; diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index 2ffc1fe4923b..c061d6958bd1 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -278,7 +278,7 @@ static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf, { struct ath10k *ar = file->private_data; char *mode = ""; - unsigned int len; + size_t len; enum ath10k_spectral_mode spectral_mode; mutex_lock(&ar->conf_mutex); @@ -370,7 +370,7 @@ static ssize_t read_file_spectral_count(struct file *file, { struct ath10k *ar = file->private_data; char buf[32]; - unsigned int len; + size_t len; u8 spectral_count; mutex_lock(&ar->conf_mutex); @@ -422,7 +422,8 @@ static ssize_t read_file_spectral_bins(struct file *file, { struct ath10k *ar = file->private_data; char buf[32]; - unsigned int len, bins, fft_size, bin_scale; + unsigned int bins, fft_size, bin_scale; + size_t len; mutex_lock(&ar->conf_mutex); diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index ed85f938e3c0..8bb36c18a749 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -150,7 +150,10 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar, ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE); /* load utf firmware image */ - ret = request_firmware(&fw_file->firmware, filename, ar->dev); + ret = request_firmware_direct(&fw_file->firmware, filename, ar->dev); + ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode fw request '%s': %d\n", + filename, ret); + if (ret) { ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n", filename, ret); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 414ad3e1eed4..2f1743e60fa1 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1779,7 +1779,7 @@ unlock: static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k_vif *arvif = (void *)vif->drv_priv; ath10k_wmi_tx_beacon_nowait(arvif); } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index cec713c99931..386aa51435f1 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -75,7 +75,7 @@ struct wmi_cmd_hdr { /* * There is no signed version of __le32, so for a temporary solution come - * up with our own version. The idea is from fs/ntfs/types.h. + * up with our own version. The idea is from fs/ntfs/endian.h. * * Use a_ prefix so that it doesn't conflict if we get proper support to * linux/types.h. diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index ac25f1781b42..87e99c12d4ba 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -641,7 +641,6 @@ struct ath6kl_vif { u32 txe_intvl; u16 bg_scan_period; u8 assoc_bss_dtim_period; - struct net_device_stats net_stats; struct target_stats target_stats; struct wmi_connect_cmd profile; u16 rsn_capab; diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 1af3fed5a72c..91ee542de3d7 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -1113,13 +1113,6 @@ static int ath6kl_close(struct net_device *dev) return 0; } -static struct net_device_stats *ath6kl_get_stats(struct net_device *dev) -{ - struct ath6kl_vif *vif = netdev_priv(dev); - - return &vif->net_stats; -} - static int ath6kl_set_features(struct net_device *dev, netdev_features_t features) { @@ -1285,7 +1278,6 @@ static const struct net_device_ops ath6kl_netdev_ops = { .ndo_open = ath6kl_open, .ndo_stop = ath6kl_close, .ndo_start_xmit = ath6kl_data_tx, - .ndo_get_stats = ath6kl_get_stats, .ndo_set_features = ath6kl_set_features, .ndo_set_rx_mode = ath6kl_set_multicast_list, }; diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index 9df41d5e3249..a531e0c5c1e2 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -405,7 +405,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) skb = skb_realloc_headroom(skb, dev->needed_headroom); kfree_skb(tmp_skb); if (skb == NULL) { - vif->net_stats.tx_dropped++; + dev->stats.tx_dropped++; return 0; } } @@ -520,8 +520,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) fail_tx: dev_kfree_skb(skb); - vif->net_stats.tx_dropped++; - vif->net_stats.tx_aborted_errors++; + dev->stats.tx_dropped++; + dev->stats.tx_aborted_errors++; return 0; } @@ -767,7 +767,7 @@ void ath6kl_tx_complete(struct htc_target *target, /* a packet was flushed */ flushing[if_idx] = true; - vif->net_stats.tx_errors++; + vif->ndev->stats.tx_errors++; if (status != -ENOSPC && status != -ECANCELED) ath6kl_warn("tx complete error: %d\n", status); @@ -783,8 +783,8 @@ void ath6kl_tx_complete(struct htc_target *target, eid, "OK"); flushing[if_idx] = false; - vif->net_stats.tx_packets++; - vif->net_stats.tx_bytes += skb->len; + vif->ndev->stats.tx_packets++; + vif->ndev->stats.tx_bytes += skb->len; } ath6kl_tx_clear_node_map(vif, eid, map_no); @@ -1365,8 +1365,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) */ spin_lock_bh(&vif->if_lock); - vif->net_stats.rx_packets++; - vif->net_stats.rx_bytes += packet->act_len; + vif->ndev->stats.rx_packets++; + vif->ndev->stats.rx_bytes += packet->act_len; spin_unlock_bh(&vif->if_lock); @@ -1395,8 +1395,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) ((packet->act_len < min_hdr_len) || (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { ath6kl_info("frame len is too short or too long\n"); - vif->net_stats.rx_errors++; - vif->net_stats.rx_length_errors++; + vif->ndev->stats.rx_errors++; + vif->ndev->stats.rx_length_errors++; dev_kfree_skb(skb); return; } @@ -1619,7 +1619,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) return; } } else if (!is_broadcast_ether_addr(datap->h_dest)) { - vif->net_stats.multicast++; + vif->ndev->stats.multicast++; } ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 7dc7205dc877..bd2269c7de6b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -75,13 +75,13 @@ #define AR9300_OTP_BASE \ ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000) #define AR9300_OTP_STATUS \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18) + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18) #define AR9300_OTP_STATUS_TYPE 0x7 #define AR9300_OTP_STATUS_VALID 0x4 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 #define AR9300_OTP_STATUS_SM_BUSY 0x1 #define AR9300_OTP_READ_DATA \ - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c) + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c) enum targetPowerHTRates { HT_TARGET_RATE_0_8_16, diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 789a3dbe8341..0ffa23a61568 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -482,7 +482,7 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv) struct rchan *rc = spec_priv->rfs_chan_spec_scan; for_each_online_cpu(i) - ret += relay_buf_full(rc->buf[i]); + ret += relay_buf_full(*per_cpu_ptr(rc->buf, i)); i = num_online_cpus(); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 11073cf87909..396bf05c6bf6 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -699,51 +699,31 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf) return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); } -static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_txq *txq, - struct ath_buf *bf, struct ath_tx_status *ts) +static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_node *an, + struct ath_atx_tid *tid, struct ath_buf *bf, + struct ath_tx_status *ts) { - struct ath_node *an; - struct ath_acq *acq = &sc->cur_chan->acq[txq->mac80211_qnum]; - struct sk_buff *skb; - struct ieee80211_hdr *hdr; - struct ieee80211_hw *hw = sc->hw; - struct ieee80211_tx_rate rates[4]; - struct ieee80211_sta *sta; - int i; + struct ath_txq *txq = tid->txq; u32 airtime = 0; - - skb = bf->bf_mpdu; - if(!skb) - return; - - hdr = (struct ieee80211_hdr *)skb->data; - memcpy(rates, bf->rates, sizeof(rates)); - - rcu_read_lock(); - - sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); - if(!sta) - goto exit; - - - an = (struct ath_node *) sta->drv_priv; + int i; airtime += ts->duration * (ts->ts_longretry + 1); + for(i = 0; i < ts->ts_rateindex; i++) { + int rate_dur = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, i); + airtime += rate_dur * bf->rates[i].count; + } - for(i=0; i < ts->ts_rateindex; i++) - airtime += ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, i) * rates[i].count; + if (sc->airtime_flags & AIRTIME_USE_TX) { + int q = txq->mac80211_qnum; + struct ath_acq *acq = &sc->cur_chan->acq[q]; - if (!!(sc->airtime_flags & AIRTIME_USE_TX)) { spin_lock_bh(&acq->lock); - an->airtime_deficit[txq->mac80211_qnum] -= airtime; - if (an->airtime_deficit[txq->mac80211_qnum] <= 0) - __ath_tx_queue_tid(sc, ath_get_skb_tid(sc, an, skb)); + an->airtime_deficit[q] -= airtime; + if (an->airtime_deficit[q] <= 0) + __ath_tx_queue_tid(sc, tid); spin_unlock_bh(&acq->lock); } ath_debug_airtime(sc, an, 0, airtime); - -exit: - rcu_read_unlock(); } static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, @@ -767,13 +747,13 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, ts->ts_rateindex); - ath_tx_count_airtime(sc, txq, bf, ts); hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); if (sta) { struct ath_node *an = (struct ath_node *)sta->drv_priv; tid = ath_get_skb_tid(sc, an, bf->bf_mpdu); + ath_tx_count_airtime(sc, an, tid, bf, ts); if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) tid->clear_ps_filter = true; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index b73a55b00fa7..60da86a8d95b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -249,10 +249,10 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, done: if (ret) { - ifp->stats.tx_dropped++; + ndev->stats.tx_dropped++; } else { - ifp->stats.tx_packets++; - ifp->stats.tx_bytes += skb->len; + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; } /* Return ok: we always eat the packet */ @@ -296,15 +296,15 @@ void brcmf_txflowblock(struct device *dev, bool state) void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb) { if (skb->pkt_type == PACKET_MULTICAST) - ifp->stats.multicast++; + ifp->ndev->stats.multicast++; if (!(ifp->ndev->flags & IFF_UP)) { brcmu_pkt_buf_free_skb(skb); return; } - ifp->stats.rx_bytes += skb->len; - ifp->stats.rx_packets++; + ifp->ndev->stats.rx_bytes += skb->len; + ifp->ndev->stats.rx_packets++; brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol)); if (in_interrupt()) @@ -327,7 +327,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb, if (ret || !(*ifp) || !(*ifp)->ndev) { if (ret != -ENODATA && *ifp) - (*ifp)->stats.rx_errors++; + (*ifp)->ndev->stats.rx_errors++; brcmu_pkt_buf_free_skb(skb); return -ENODATA; } @@ -388,7 +388,7 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success) } if (!success) - ifp->stats.tx_errors++; + ifp->ndev->stats.tx_errors++; brcmu_pkt_buf_free_skb(txp); } @@ -411,15 +411,6 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success) } } -static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev) -{ - struct brcmf_if *ifp = netdev_priv(ndev); - - brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); - - return &ifp->stats; -} - static void brcmf_ethtool_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { @@ -492,7 +483,6 @@ static int brcmf_netdev_open(struct net_device *ndev) static const struct net_device_ops brcmf_netdev_ops_pri = { .ndo_open = brcmf_netdev_open, .ndo_stop = brcmf_netdev_stop, - .ndo_get_stats = brcmf_netdev_get_stats, .ndo_start_xmit = brcmf_netdev_start_xmit, .ndo_set_mac_address = brcmf_netdev_set_mac_address, .ndo_set_rx_mode = brcmf_netdev_set_multicast_list diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index de3197be5491..6aecd8dfd824 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -171,7 +171,6 @@ enum brcmf_netif_stop_reason { * @drvr: points to device related information. * @vif: points to cfg80211 specific interface information. * @ndev: associated network device. - * @stats: interface specific network statistics. * @multicast_work: worker object for multicast provisioning. * @ndoffload_work: worker object for neighbor discovery offload configuration. * @fws_desc: interface specific firmware-signalling descriptor. @@ -187,7 +186,6 @@ struct brcmf_if { struct brcmf_pub *drvr; struct brcmf_cfg80211_vif *vif; struct net_device *ndev; - struct net_device_stats stats; struct work_struct multicast_work; struct work_struct ndoffload_work; struct brcmf_fws_mac_descriptor *fws_desc; diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c index 9d96b7c928f7..28cf97489001 100644 --- a/drivers/net/wireless/intersil/orinoco/main.c +++ b/drivers/net/wireless/intersil/orinoco/main.c @@ -294,14 +294,6 @@ int orinoco_stop(struct net_device *dev) } EXPORT_SYMBOL(orinoco_stop); -struct net_device_stats *orinoco_get_stats(struct net_device *dev) -{ - struct orinoco_private *priv = ndev_priv(dev); - - return &priv->stats; -} -EXPORT_SYMBOL(orinoco_get_stats); - void orinoco_set_multicast_list(struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); @@ -433,7 +425,7 @@ EXPORT_SYMBOL(orinoco_process_xmit_skb); static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct hermes *hw = &priv->hw; int err = 0; u16 txfid = priv->txfid; @@ -593,10 +585,7 @@ static void __orinoco_ev_alloc(struct net_device *dev, struct hermes *hw) static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw) { - struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; - - stats->tx_packets++; + dev->stats.tx_packets++; netif_wake_queue(dev); @@ -605,8 +594,7 @@ static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw) static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw) { - struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; u16 fid = hermes_read_regn(hw, TXCOMPLFID); u16 status; struct hermes_txexc_data hdr; @@ -662,7 +650,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw) void orinoco_tx_timeout(struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct hermes *hw = &priv->hw; printk(KERN_WARNING "%s: Tx timeout! " @@ -749,7 +737,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid, int len; struct sk_buff *skb; struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct hermes *hw = &priv->hw; len = le16_to_cpu(desc->data_len); @@ -840,7 +828,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid, void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw) { struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct iw_statistics *wstats = &priv->wstats; struct sk_buff *skb = NULL; u16 rxfid, status; @@ -959,7 +947,7 @@ static void orinoco_rx(struct net_device *dev, struct sk_buff *skb) { struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; u16 status, fc; int length; struct ethhdr *hdr; @@ -2137,7 +2125,6 @@ static const struct net_device_ops orinoco_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = orinoco_tx_timeout, - .ndo_get_stats = orinoco_get_stats, }; /* Allocate private data. diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h index 5fa1c3e3713f..430862a6a24b 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco.h +++ b/drivers/net/wireless/intersil/orinoco/orinoco.h @@ -84,7 +84,6 @@ struct orinoco_private { /* Net device stuff */ struct net_device *ndev; - struct net_device_stats stats; struct iw_statistics wstats; /* Hardware control variables */ @@ -206,7 +205,6 @@ int orinoco_process_xmit_skb(struct sk_buff *skb, /* Common ndo functions exported for reuse by orinoco_usb */ int orinoco_open(struct net_device *dev); int orinoco_stop(struct net_device *dev); -struct net_device_stats *orinoco_get_stats(struct net_device *dev); void orinoco_set_multicast_list(struct net_device *dev); int orinoco_change_mtu(struct net_device *dev, int new_mtu); void orinoco_tx_timeout(struct net_device *dev); diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index bca6935a94db..98e1380b9917 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -403,8 +403,7 @@ static void ezusb_ctx_complete(struct request_context *ctx) if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) { struct net_device *dev = upriv->dev; - struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; if (ctx->state != EZUSB_CTX_COMPLETE) stats->tx_errors++; @@ -1183,7 +1182,7 @@ static int ezusb_program(struct hermes *hw, const char *buf, static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct ezusb_priv *upriv = priv->card; u8 mic[MICHAEL_MIC_LEN + 1]; int err = 0; @@ -1556,7 +1555,6 @@ static const struct net_device_ops ezusb_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = orinoco_tx_timeout, - .ndo_get_stats = orinoco_get_stats, }; static int ezusb_probe(struct usb_interface *interface, diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 9d80180a5519..5ebca1d0cfc7 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1519,13 +1519,13 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter) struct device *dev = adapter->dev; if (!dev->of_node) - return; + goto err_exit; adapter->dt_node = dev->of_node; adapter->irq_wakeup = irq_of_parse_and_map(adapter->dt_node, 0); if (!adapter->irq_wakeup) { - dev_info(dev, "fail to parse irq_wakeup from device tree\n"); - return; + dev_dbg(dev, "fail to parse irq_wakeup from device tree\n"); + goto err_exit; } ret = devm_request_irq(dev, adapter->irq_wakeup, @@ -1545,7 +1545,7 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter) return; err_exit: - adapter->irq_wakeup = 0; + adapter->irq_wakeup = -1; } /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index 62357465fe29..0d2670a56c4c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -55,7 +55,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); * If the csr_mutex is already held then the _lock variants must * be used instead. */ -static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, +static void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 *value) { @@ -66,7 +66,7 @@ static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, *value = le16_to_cpu(reg); } -static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, +static void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 *value) { @@ -77,16 +77,7 @@ static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, *value = le16_to_cpu(reg); } -static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev, - const unsigned int offset, - void *value, const u16 length) -{ - rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, - USB_VENDOR_REQUEST_IN, offset, - value, length); -} - -static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, +static void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { @@ -96,7 +87,7 @@ static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, ®, sizeof(reg)); } -static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, +static void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { @@ -106,7 +97,7 @@ static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, ®, sizeof(reg), REGISTER_TIMEOUT); } -static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, +static void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, const unsigned int offset, void *value, const u16 length) { diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index e9f0a76107ed..d67bbfb6ad8e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -1872,16 +1872,8 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy( } } else if (bt_link_info->hid_exist && bt_link_info->a2dp_exist) { /*HID+A2DP */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->auto_tdma_adjust = false; - } else { /*for low BT RSSI*/ - halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, - true, 14); - coex_dm->auto_tdma_adjust = false; - } + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + coex_dm->auto_tdma_adjust = false; halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6); /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */ @@ -2250,16 +2242,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, 1, 2, 30, 0); - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8723b1ant_limited_tx(btcoexist, - NORMAL_EXEC, - 1, 1, 1, 1); - } else { - halbtc8723b1ant_limited_tx(btcoexist, - NORMAL_EXEC, - 1, 1, 1, 1); - } + halbtc8723b1ant_limited_tx(btcoexist, + NORMAL_EXEC, 1, 1, 1, 1); } else { halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index 20562f081c96..8b689ed9a629 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -1768,11 +1768,7 @@ static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist, /* tdma and coex table*/ halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == - wifi_status) - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); - else - halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist, @@ -2123,16 +2119,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) wifi_rssi_state = halbtc8821a1ant_WifiRssiState(btcoexist, 1, 2, 30, 0); - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a1ant_limited_tx(btcoexist, - NORMAL_EXEC, 1, 1, - 1, 1); - } else { - halbtc8821a1ant_limited_tx(btcoexist, - NORMAL_EXEC, 1, 1, - 1, 1); - } + halbtc8821a1ant_limited_tx(btcoexist, + NORMAL_EXEC, 1, 1, 1, 1); } else { halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index 962fbaf3ba1d..1717e9ce96ca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -2388,14 +2388,8 @@ static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist) * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 0); /*for voice quality*/ - } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 0); /*for voice quality*/ - } + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); /*for voice quality*/ /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -2785,14 +2779,7 @@ static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist) NORMAL_EXEC, false); } - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 1); - } else { - halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, - false, 1); - } + halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -2830,40 +2817,18 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { - /* for HID at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5afa5afa, 0xffff, 0x3); - } else { - /* for HID quality & wifi performance balance at 11n mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5afa5afa, 0xffff, 0x3); - } + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5afa5afa, 0xffff, 0x3); if (BTC_WIFI_BW_HT40 == wifi_bw) { /* fw mechanism */ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - false, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - false, 3); - } - } else { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - true, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - true, 3); - } - } + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + btc8821a2ant_tdma_dur_adj(btcoexist, false, + false, 3); + else + btc8821a2ant_tdma_dur_adj(btcoexist, false, + true, 3); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -2877,31 +2842,14 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) false, false); btc8821a2ant_sw_mech2(btcoexist, false, false, false, 0x18); - }; + } } else { /* fw mechanism */ if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - false, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - false, 3); - } - } else { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - true, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, false, - true, 3); - } - } + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 3); + else + btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 3); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -2937,15 +2885,8 @@ static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { - /* for HID at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5f5a5f, 0xffff, 0x3); - } else { - /* for HID quality & wifi performance balance at 11n mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5f5a5f, 0xffff, 0x3); - } + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5f5a5f, 0xffff, 0x3); if (BTC_WIFI_BW_HT40 == wifi_bw) { halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3); @@ -3020,40 +2961,12 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { - /* for HID at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5a5a5a, 0xffff, 0x3); - } else { - /* for HID quality & wifi performance balance at 11n mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5a5a5a5a, 0xffff, 0x3); - } + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5a5a5a5a, 0xffff, 0x3); if (BTC_WIFI_BW_HT40 == wifi_bw) { /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - true, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - true, 3); - } - } else { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - true, 3); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, true, - true, 3); - } - } + btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 3); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -3126,40 +3039,12 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist) btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); - if (BTC_WIFI_BW_LEGACY == wifi_bw) { - /* for HID at 11b/g mode */ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5f5b5f5b, 0xffffff, 0x3); - } else { - /*for HID quality & wifi performance balance at 11n mode*/ - halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, - 0x5f5b5f5b, 0xffffff, 0x3); - } + halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff, + 0x5f5b5f5b, 0xffffff, 0x3); if (BTC_WIFI_BW_HT40 == wifi_bw) { /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } - } else { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } - } + btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || @@ -3176,29 +3061,7 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist) } } else { /* fw mechanism */ - if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || - (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - if (bt_info_ext&BIT0) { - /* a2dp basic rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - - } else { - /* a2dp edr rate */ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } - } else { - if (bt_info_ext&BIT0) { - /*a2dp basic rate*/ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } else { - /*a2dp edr rate*/ - btc8821a2ant_tdma_dur_adj(btcoexist, - true, true, 2); - } - } + btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2); /* sw mechanism */ if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index e536aa01b937..a21fda910529 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -3202,6 +3202,21 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, if (ret < 0) goto out_sleep; } + + /* + * If interface in AP mode and created with allmulticast then disable + * the firmware filters so that all multicast packets are passed + * This is mandatory for MDNS based discovery protocols + */ + if (wlvif->bss_type == BSS_TYPE_AP_BSS) { + if (*total & FIF_ALLMULTI) { + ret = wl1271_acx_group_address_tbl(wl, wlvif, + false, + NULL, 0); + if (ret < 0) + goto out_sleep; + } + } } /* diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 3ce1f7da8647..530586be05b4 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -113,10 +113,10 @@ struct xenvif_stats { * A subset of struct net_device_stats that contains only the * fields that are updated in netback.c for each queue. */ - unsigned int rx_bytes; - unsigned int rx_packets; - unsigned int tx_bytes; - unsigned int tx_packets; + u64 rx_bytes; + u64 rx_packets; + u64 tx_bytes; + u64 tx_packets; /* Additional stats used by xenvif */ unsigned long rx_gso_checksum_fixup; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 1073b27e54aa..a2d326760a72 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -221,10 +221,10 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; - unsigned long rx_bytes = 0; - unsigned long rx_packets = 0; - unsigned long tx_bytes = 0; - unsigned long tx_packets = 0; + u64 rx_bytes = 0; + u64 rx_packets = 0; + u64 tx_bytes = 0; + u64 tx_packets = 0; unsigned int index; spin_lock(&vif->lock); diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index eca9688bf9d9..c00238491673 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c @@ -1629,6 +1629,28 @@ static void atom_deinit_dev(struct intel_ntb_dev *ndev) /* Skylake Xeon NTB */ +static int skx_poll_link(struct intel_ntb_dev *ndev) +{ + u16 reg_val; + int rc; + + ndev->reg->db_iowrite(ndev->db_link_mask, + ndev->self_mmio + + ndev->self_reg->db_clear); + + rc = pci_read_config_word(ndev->ntb.pdev, + SKX_LINK_STATUS_OFFSET, ®_val); + if (rc) + return 0; + + if (reg_val == ndev->lnk_sta) + return 0; + + ndev->lnk_sta = reg_val; + + return 1; +} + static u64 skx_db_ioread(void __iomem *mmio) { return ioread64(mmio); @@ -2852,7 +2874,7 @@ static struct intel_b2b_addr xeon_b2b_dsd_addr = { }; static const struct intel_ntb_reg skx_reg = { - .poll_link = xeon_poll_link, + .poll_link = skx_poll_link, .link_is_up = xeon_link_is_up, .db_ioread = skx_db_ioread, .db_iowrite = skx_db_iowrite, diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index f81aa4b18d9f..02ca45fdd892 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1802,7 +1802,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, node = dev_to_node(&ndev->dev); - free_queue = ffs(nt->qp_bitmap); + free_queue = ffs(nt->qp_bitmap_free); if (!free_queue) goto err; @@ -2273,9 +2273,8 @@ module_init(ntb_transport_init); static void __exit ntb_transport_exit(void) { - debugfs_remove_recursive(nt_debugfs_dir); - ntb_unregister_client(&ntb_transport_client); bus_unregister(&ntb_transport_bus); + debugfs_remove_recursive(nt_debugfs_dir); } module_exit(ntb_transport_exit); diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index e75d4fdc0866..434e1d474f33 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -265,6 +265,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst, if (dma_submit_error(cookie)) goto err_set_unmap; + dmaengine_unmap_put(unmap); + atomic_inc(&pctx->dma_sync); dma_async_issue_pending(chan); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 262281bd68fa..0b2979816dbf 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -147,6 +147,7 @@ EXPORT_SYMBOL(of_mdio_parse_addr); */ static const struct of_device_id whitelist_phys[] = { { .compatible = "brcm,40nm-ephy" }, + { .compatible = "broadcom,bcm5241" }, { .compatible = "marvell,88E1111", }, { .compatible = "marvell,88e1116", }, { .compatible = "marvell,88e1118", }, diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 717529331dac..2dd1c68e6de8 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -433,6 +433,17 @@ static int pcie_pme_resume(struct pcie_device *srv) return 0; } +/** + * pcie_pme_remove - Prepare PCIe PME service device for removal. + * @srv - PCIe service device to remove. + */ +static void pcie_pme_remove(struct pcie_device *srv) +{ + pcie_pme_suspend(srv); + free_irq(srv->irq, srv); + kfree(get_service_data(srv)); +} + static struct pcie_port_service_driver pcie_pme_driver = { .name = "pcie_pme", .port_type = PCI_EXP_TYPE_ROOT_PORT, @@ -441,6 +452,7 @@ static struct pcie_port_service_driver pcie_pme_driver = { .probe = pcie_pme_probe, .suspend = pcie_pme_suspend, .resume = pcie_pme_resume, + .remove = pcie_pme_remove, }; /** diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 9c13381b6966..e8142803a1a7 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -221,18 +221,17 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, mutex_init(&ptp->pincfg_mux); init_waitqueue_head(&ptp->tsev_wq); + err = ptp_populate_pin_groups(ptp); + if (err) + goto no_pin_groups; + /* Create a new device in our class. */ - ptp->dev = device_create(ptp_class, parent, ptp->devid, ptp, - "ptp%d", ptp->index); + ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid, + ptp, ptp->pin_attr_groups, + "ptp%d", ptp->index); if (IS_ERR(ptp->dev)) goto no_device; - dev_set_drvdata(ptp->dev, ptp); - - err = ptp_populate_sysfs(ptp); - if (err) - goto no_sysfs; - /* Register a new PPS source. */ if (info->pps) { struct pps_source_info pps; @@ -260,10 +259,10 @@ no_clock: if (ptp->pps_source) pps_unregister_source(ptp->pps_source); no_pps: - ptp_cleanup_sysfs(ptp); -no_sysfs: device_destroy(ptp_class, ptp->devid); no_device: + ptp_cleanup_pin_groups(ptp); +no_pin_groups: mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); ida_simple_remove(&ptp_clocks_map, index); @@ -282,8 +281,9 @@ int ptp_clock_unregister(struct ptp_clock *ptp) /* Release the clock's resources. */ if (ptp->pps_source) pps_unregister_source(ptp->pps_source); - ptp_cleanup_sysfs(ptp); + device_destroy(ptp_class, ptp->devid); + ptp_cleanup_pin_groups(ptp); posix_clock_unregister(&ptp->clock); return 0; diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 9c5d41421b65..d95888974d0c 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -54,6 +54,8 @@ struct ptp_clock { struct device_attribute *pin_dev_attr; struct attribute **pin_attr; struct attribute_group pin_attr_group; + /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ + const struct attribute_group *pin_attr_groups[2]; }; /* @@ -94,8 +96,7 @@ uint ptp_poll(struct posix_clock *pc, extern const struct attribute_group *ptp_groups[]; -int ptp_cleanup_sysfs(struct ptp_clock *ptp); - -int ptp_populate_sysfs(struct ptp_clock *ptp); +int ptp_populate_pin_groups(struct ptp_clock *ptp); +void ptp_cleanup_pin_groups(struct ptp_clock *ptp); #endif diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 53d43954a974..48401dfcd999 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -46,27 +46,6 @@ PTP_SHOW_INT(n_periodic_outputs, n_per_out); PTP_SHOW_INT(n_programmable_pins, n_pins); PTP_SHOW_INT(pps_available, pps); -static struct attribute *ptp_attrs[] = { - &dev_attr_clock_name.attr, - &dev_attr_max_adjustment.attr, - &dev_attr_n_alarms.attr, - &dev_attr_n_external_timestamps.attr, - &dev_attr_n_periodic_outputs.attr, - &dev_attr_n_programmable_pins.attr, - &dev_attr_pps_available.attr, - NULL, -}; - -static const struct attribute_group ptp_group = { - .attrs = ptp_attrs, -}; - -const struct attribute_group *ptp_groups[] = { - &ptp_group, - NULL, -}; - - static ssize_t extts_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -91,6 +70,7 @@ static ssize_t extts_enable_store(struct device *dev, out: return err; } +static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store); static ssize_t extts_fifo_show(struct device *dev, struct device_attribute *attr, char *page) @@ -124,6 +104,7 @@ out: mutex_unlock(&ptp->tsevq_mux); return cnt; } +static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL); static ssize_t period_store(struct device *dev, struct device_attribute *attr, @@ -151,6 +132,7 @@ static ssize_t period_store(struct device *dev, out: return err; } +static DEVICE_ATTR(period, 0220, NULL, period_store); static ssize_t pps_enable_store(struct device *dev, struct device_attribute *attr, @@ -177,6 +159,57 @@ static ssize_t pps_enable_store(struct device *dev, out: return err; } +static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); + +static struct attribute *ptp_attrs[] = { + &dev_attr_clock_name.attr, + + &dev_attr_max_adjustment.attr, + &dev_attr_n_alarms.attr, + &dev_attr_n_external_timestamps.attr, + &dev_attr_n_periodic_outputs.attr, + &dev_attr_n_programmable_pins.attr, + &dev_attr_pps_available.attr, + + &dev_attr_extts_enable.attr, + &dev_attr_fifo.attr, + &dev_attr_period.attr, + &dev_attr_pps_enable.attr, + NULL +}; + +static umode_t ptp_is_attribute_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_clock_info *info = ptp->info; + umode_t mode = attr->mode; + + if (attr == &dev_attr_extts_enable.attr || + attr == &dev_attr_fifo.attr) { + if (!info->n_ext_ts) + mode = 0; + } else if (attr == &dev_attr_period.attr) { + if (!info->n_per_out) + mode = 0; + } else if (attr == &dev_attr_pps_enable.attr) { + if (!info->pps) + mode = 0; + } + + return mode; +} + +static const struct attribute_group ptp_group = { + .is_visible = ptp_is_attribute_visible, + .attrs = ptp_attrs, +}; + +const struct attribute_group *ptp_groups[] = { + &ptp_group, + NULL +}; static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name) { @@ -235,47 +268,20 @@ static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store); -static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL); -static DEVICE_ATTR(period, 0220, NULL, period_store); -static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); - -int ptp_cleanup_sysfs(struct ptp_clock *ptp) +int ptp_populate_pin_groups(struct ptp_clock *ptp) { - struct device *dev = ptp->dev; - struct ptp_clock_info *info = ptp->info; - - if (info->n_ext_ts) { - device_remove_file(dev, &dev_attr_extts_enable); - device_remove_file(dev, &dev_attr_fifo); - } - if (info->n_per_out) - device_remove_file(dev, &dev_attr_period); - - if (info->pps) - device_remove_file(dev, &dev_attr_pps_enable); - - if (info->n_pins) { - sysfs_remove_group(&dev->kobj, &ptp->pin_attr_group); - kfree(ptp->pin_attr); - kfree(ptp->pin_dev_attr); - } - return 0; -} - -static int ptp_populate_pins(struct ptp_clock *ptp) -{ - struct device *dev = ptp->dev; struct ptp_clock_info *info = ptp->info; int err = -ENOMEM, i, n_pins = info->n_pins; - ptp->pin_dev_attr = kzalloc(n_pins * sizeof(*ptp->pin_dev_attr), + if (!n_pins) + return 0; + + ptp->pin_dev_attr = kcalloc(n_pins, sizeof(*ptp->pin_dev_attr), GFP_KERNEL); if (!ptp->pin_dev_attr) goto no_dev_attr; - ptp->pin_attr = kzalloc((1 + n_pins) * sizeof(struct attribute *), - GFP_KERNEL); + ptp->pin_attr = kcalloc(1 + n_pins, sizeof(*ptp->pin_attr), GFP_KERNEL); if (!ptp->pin_attr) goto no_pin_attr; @@ -292,61 +298,18 @@ static int ptp_populate_pins(struct ptp_clock *ptp) ptp->pin_attr_group.name = "pins"; ptp->pin_attr_group.attrs = ptp->pin_attr; - err = sysfs_create_group(&dev->kobj, &ptp->pin_attr_group); - if (err) - goto no_group; + ptp->pin_attr_groups[0] = &ptp->pin_attr_group; + return 0; -no_group: - kfree(ptp->pin_attr); no_pin_attr: kfree(ptp->pin_dev_attr); no_dev_attr: return err; } -int ptp_populate_sysfs(struct ptp_clock *ptp) +void ptp_cleanup_pin_groups(struct ptp_clock *ptp) { - struct device *dev = ptp->dev; - struct ptp_clock_info *info = ptp->info; - int err; - - if (info->n_ext_ts) { - err = device_create_file(dev, &dev_attr_extts_enable); - if (err) - goto out1; - err = device_create_file(dev, &dev_attr_fifo); - if (err) - goto out2; - } - if (info->n_per_out) { - err = device_create_file(dev, &dev_attr_period); - if (err) - goto out3; - } - if (info->pps) { - err = device_create_file(dev, &dev_attr_pps_enable); - if (err) - goto out4; - } - if (info->n_pins) { - err = ptp_populate_pins(ptp); - if (err) - goto out5; - } - return 0; -out5: - if (info->pps) - device_remove_file(dev, &dev_attr_pps_enable); -out4: - if (info->n_per_out) - device_remove_file(dev, &dev_attr_period); -out3: - if (info->n_ext_ts) - device_remove_file(dev, &dev_attr_fifo); -out2: - if (info->n_ext_ts) - device_remove_file(dev, &dev_attr_extts_enable); -out1: - return err; + kfree(ptp->pin_attr); + kfree(ptp->pin_dev_attr); } diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 10368ed8fd13..b6f5f1e1826c 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -163,7 +163,7 @@ int reset_control_reset(struct reset_control *rstc) } ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id); - if (rstc->shared && !ret) + if (rstc->shared && ret) atomic_dec(&rstc->triggered_count); return ret; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 75f820ca17b7..27ff38f839fc 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1583,7 +1583,7 @@ out: int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; - struct zfcp_fsf_req *req = NULL; + struct zfcp_fsf_req *req; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) zfcp_fsf_req_free(req); out: spin_unlock_irq(&qdio->req_q_lock); - if (req && !IS_ERR(req)) + if (!retval) zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); return retval; } @@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) { struct zfcp_qdio *qdio = wka_port->adapter->qdio; - struct zfcp_fsf_req *req = NULL; + struct zfcp_fsf_req *req; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) zfcp_fsf_req_free(req); out: spin_unlock_irq(&qdio->req_q_lock); - if (req && !IS_ERR(req)) + if (!retval) zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); return retval; } diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 4f56b1003cc7..5b48bedd7c38 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -50,9 +50,13 @@ struct aac_common aac_config = { static inline int aac_is_msix_mode(struct aac_dev *dev) { - u32 status; + u32 status = 0; - status = src_readl(dev, MUnit.OMR); + if (dev->pdev->device == PMC_DEVICE_S6 || + dev->pdev->device == PMC_DEVICE_S7 || + dev->pdev->device == PMC_DEVICE_S8) { + status = src_readl(dev, MUnit.OMR); + } return (status & AAC_INT_MODE_MSIX); } diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 75f3fce1c867..0b5b423b1db0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -51,6 +51,7 @@ #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pci.h> +#include <linux/pci-aspm.h> #include <linux/interrupt.h> #include <linux/aer.h> #include <linux/raid_class.h> @@ -4657,6 +4658,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) struct MPT3SAS_DEVICE *sas_device_priv_data; u32 response_code = 0; unsigned long flags; + unsigned int sector_sz; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); scmd = _scsih_scsi_lookup_get_clear(ioc, smid); @@ -4715,6 +4717,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) } xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); + + /* In case of bogus fw or device, we could end up having + * unaligned partial completion. We can force alignment here, + * then scsi-ml does not need to handle this misbehavior. + */ + sector_sz = scmd->device->sector_size; + if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz && + xfer_cnt % sector_sz)) { + sdev_printk(KERN_INFO, scmd->device, + "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n", + xfer_cnt, sector_sz); + xfer_cnt = round_down(xfer_cnt, sector_sz); + } + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) log_info = le32_to_cpu(mpi_reply->IOCLogInfo); @@ -8746,6 +8762,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) switch (hba_mpi_version) { case MPI2_VERSION: + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); /* Use mpt2sas driver host template for SAS 2.0 HBA's */ shost = scsi_host_alloc(&mpt2sas_driver_template, sizeof(struct MPT3SAS_ADAPTER)); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index dc88a09f9043..a94b0b6bd030 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3242,7 +3242,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) * from a probe failure context. */ if (!ha->rsp_q_map || !ha->rsp_q_map[0]) - return; + goto free_irqs; rsp = ha->rsp_q_map[0]; if (ha->flags.msix_enabled) { @@ -3262,6 +3262,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) free_irq(pci_irq_vector(ha->pdev, 0), rsp); } +free_irqs: pci_free_irq_vectors(ha->pdev); } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 0a000ecf0881..40660461a4b5 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1616,7 +1616,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) /* Don't abort commands in adapter during EEH * recovery as it's not accessible/responding. */ - if (!ha->flags.eeh_busy) { + if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) { /* Get a reference to the sp and drop the lock. * The reference ensures this sp->done() call * - and not the call in qla2xxx_eh_abort() - diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 7f390849343b..c4444d6f439f 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -1024,6 +1024,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long buf_offset; unsigned long current_buf_start; unsigned long start_byte; + unsigned long prev_start_byte; unsigned long working_bytes = total_out - buf_start; unsigned long bytes; char *kaddr; @@ -1071,26 +1072,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, if (!bio->bi_iter.bi_size) return 0; bvec = bio_iter_iovec(bio, bio->bi_iter); - + prev_start_byte = start_byte; start_byte = page_offset(bvec.bv_page) - disk_start; /* - * make sure our new page is covered by this - * working buffer + * We need to make sure we're only adjusting + * our offset into compression working buffer when + * we're switching pages. Otherwise we can incorrectly + * keep copying when we were actually done. */ - if (total_out <= start_byte) - return 1; + if (start_byte != prev_start_byte) { + /* + * make sure our new page is covered by this + * working buffer + */ + if (total_out <= start_byte) + return 1; - /* - * the next page in the biovec might not be adjacent - * to the last page, but it might still be found - * inside this working buffer. bump our offset pointer - */ - if (total_out > start_byte && - current_buf_start < start_byte) { - buf_offset = start_byte - buf_start; - working_bytes = total_out - start_byte; - current_buf_start = buf_start + buf_offset; + /* + * the next page in the biovec might not be adjacent + * to the last page, but it might still be found + * inside this working buffer. bump our offset pointer + */ + if (total_out > start_byte && + current_buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes = total_out - start_byte; + current_buf_start = buf_start + buf_offset; + } } } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 33f967d30b2a..21e51b0ba188 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -5653,6 +5653,10 @@ long btrfs_ioctl(struct file *file, unsigned int #ifdef CONFIG_COMPAT long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { + /* + * These all access 32-bit values anyway so no further + * handling is necessary. + */ switch (cmd) { case FS_IOC32_GETFLAGS: cmd = FS_IOC_GETFLAGS; @@ -5663,8 +5667,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FS_IOC32_GETVERSION: cmd = FS_IOC_GETVERSION; break; - default: - return -ENOIOCTLCMD; } return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 4e06a27ed7f8..f11792672977 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -399,6 +399,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) { spin_lock(&fiq->waitq.lock); + if (test_bit(FR_FINISHED, &req->flags)) { + spin_unlock(&fiq->waitq.lock); + return; + } if (list_empty(&req->intr_entry)) { list_add_tail(&req->intr_entry, &fiq->interrupts); wake_up_locked(&fiq->waitq); @@ -1372,6 +1376,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, * code can Oops if the buffer persists after module unload. */ bufs[page_nr].ops = &nosteal_pipe_buf_ops; + bufs[page_nr].flags = 0; ret = add_to_pipe(pipe, &bufs[page_nr++]); if (unlikely(ret < 0)) break; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 94f50cac91c6..70e94170af85 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1420,26 +1420,32 @@ static struct shrinker glock_shrinker = { * @sdp: the filesystem * @bucket: the bucket * + * Note that the function can be called multiple times on the same + * object. So the user must ensure that the function can cope with + * that. */ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) { struct gfs2_glock *gl; - struct rhash_head *pos; - const struct bucket_table *tbl; - int i; + struct rhashtable_iter iter; - rcu_read_lock(); - tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table); - for (i = 0; i < tbl->size; i++) { - rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) { + rhashtable_walk_enter(&gl_hash_table, &iter); + + do { + gl = ERR_PTR(rhashtable_walk_start(&iter)); + if (gl) + continue; + + while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) if ((gl->gl_name.ln_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref)) examiner(gl); - } - } - rcu_read_unlock(); - cond_resched(); + + rhashtable_walk_stop(&iter); + } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); + + rhashtable_walk_exit(&iter); } /** diff --git a/fs/splice.c b/fs/splice.c index 873d83104e79..4ef78aa8ef61 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -204,6 +204,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, buf->len = spd->partial[page_nr].len; buf->private = spd->partial[page_nr].private; buf->ops = spd->ops; + buf->flags = 0; pipe->nrbufs++; page_nr++; diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 92bc89ae7e20..c970a25d2a49 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -21,20 +21,19 @@ struct cgroup_bpf { */ struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; + bool disallow_override[MAX_BPF_ATTACH_TYPE]; }; void cgroup_bpf_put(struct cgroup *cgrp); void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); -void __cgroup_bpf_update(struct cgroup *cgrp, - struct cgroup *parent, - struct bpf_prog *prog, - enum bpf_attach_type type); +int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent, + struct bpf_prog *prog, enum bpf_attach_type type, + bool overridable); /* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ -void cgroup_bpf_update(struct cgroup *cgrp, - struct bpf_prog *prog, - enum bpf_attach_type type); +int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type, bool overridable); int __cgroup_bpf_run_filter_skb(struct sock *sk, struct sk_buff *skb, diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 57d60dc5b600..909fc033173a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -8,10 +8,12 @@ #define _LINUX_BPF_H 1 #include <uapi/linux/bpf.h> + #include <linux/workqueue.h> #include <linux/file.h> #include <linux/percpu.h> #include <linux/err.h> +#include <linux/rbtree_latch.h> struct perf_event; struct bpf_map; @@ -177,6 +179,8 @@ struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; u32 max_ctx_offset; + struct latch_tree_node ksym_tnode; + struct list_head ksym_lnode; const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; diff --git a/include/linux/filter.h b/include/linux/filter.h index e4eb2546339a..0c1cc9143cb2 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -54,6 +54,12 @@ struct bpf_prog_aux; #define BPF_REG_AX MAX_BPF_REG #define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) +/* As per nm, we expose JITed images as text (code) section for + * kallsyms. That way, tools like perf can find it to match + * addresses. + */ +#define BPF_SYM_ELF_TYPE 't' + /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 @@ -555,6 +561,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { set_memory_rw((unsigned long)fp, fp->pages); } + +static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) +{ + set_memory_rw((unsigned long)hdr, hdr->pages); +} #else static inline void bpf_prog_lock_ro(struct bpf_prog *fp) { @@ -563,8 +574,21 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp) static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) { } + +static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) +{ +} #endif /* CONFIG_DEBUG_SET_MODULE_RONX */ +static inline struct bpf_binary_header * +bpf_jit_binary_hdr(const struct bpf_prog *fp) +{ + unsigned long real_start = (unsigned long)fp->bpf_func; + unsigned long addr = real_start & PAGE_MASK; + + return (void *)addr; +} + int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); static inline int sk_filter(struct sock *sk, struct sk_buff *skb) { @@ -607,6 +631,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); +void bpf_jit_compile(struct bpf_prog *prog); bool bpf_helper_changes_pkt_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, @@ -616,6 +641,7 @@ void bpf_warn_invalid_xdp_action(u32 act); #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; extern int bpf_jit_harden; +extern int bpf_jit_kallsyms; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); @@ -625,7 +651,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, bpf_jit_fill_hole_t bpf_fill_ill_insns); void bpf_jit_binary_free(struct bpf_binary_header *hdr); -void bpf_jit_compile(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp); struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); @@ -651,6 +676,11 @@ static inline bool bpf_jit_is_ebpf(void) # endif } +static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) +{ + return fp->jited && bpf_jit_is_ebpf(); +} + static inline bool bpf_jit_blinding_enabled(void) { /* These are the prerequisites, should someone ever have the @@ -668,15 +698,91 @@ static inline bool bpf_jit_blinding_enabled(void) return true; } -#else -static inline void bpf_jit_compile(struct bpf_prog *fp) + +static inline bool bpf_jit_kallsyms_enabled(void) +{ + /* There are a couple of corner cases where kallsyms should + * not be enabled f.e. on hardening. + */ + if (bpf_jit_harden) + return false; + if (!bpf_jit_kallsyms) + return false; + if (bpf_jit_kallsyms == 1) + return true; + + return false; +} + +const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym); +bool is_bpf_text_address(unsigned long addr); +int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym); + +static inline const char * +bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) { + const char *ret = __bpf_address_lookup(addr, size, off, sym); + + if (ret && modname) + *modname = NULL; + return ret; +} + +void bpf_prog_kallsyms_add(struct bpf_prog *fp); +void bpf_prog_kallsyms_del(struct bpf_prog *fp); + +#else /* CONFIG_BPF_JIT */ + +static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) +{ + return false; } static inline void bpf_jit_free(struct bpf_prog *fp) { bpf_prog_unlock_free(fp); } + +static inline bool bpf_jit_kallsyms_enabled(void) +{ + return false; +} + +static inline const char * +__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym) +{ + return NULL; +} + +static inline bool is_bpf_text_address(unsigned long addr) +{ + return false; +} + +static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *sym) +{ + return -ERANGE; +} + +static inline const char * +bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) +{ + return NULL; +} + +static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) +{ +} + +static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) +{ +} #endif /* CONFIG_BPF_JIT */ #define BPF_ANC BIT(15) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 98f65ed8f8b0..f40f0ab3847a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -352,6 +352,7 @@ enum gro_result { GRO_HELD, GRO_NORMAL, GRO_DROP, + GRO_CONSUMED, }; typedef enum gro_result gro_result_t; @@ -2667,6 +2668,19 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, remcsum_unadjust((__sum16 *)ptr, grc->delta); } +#ifdef CONFIG_XFRM_OFFLOAD +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) +{ + if (PTR_ERR(pp) != -EINPROGRESS) + NAPI_GRO_CB(skb)->flush |= flush; +} +#else +static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) +{ + NAPI_GRO_CB(skb)->flush |= flush; +} +#endif + static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, diff --git a/include/linux/phy.h b/include/linux/phy.h index d9bdf53e0514..772476028a65 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -807,6 +807,9 @@ int phy_stop_interrupts(struct phy_device *phydev); static inline int phy_read_status(struct phy_device *phydev) { + if (!phydev->drv) + return -EIO; + return phydev->drv->read_status(phydev); } diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 2052011bf9fb..6c70444da3b9 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -111,6 +111,11 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) return 0; } +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * consume in interrupt or BH context, you must disable interrupts/BH when + * calling this. + */ static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) { int ret; @@ -242,6 +247,11 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) return ptr; } +/* + * Note: resize (below) nests producer lock within consumer lock, so if you + * call this in interrupt or BH context, you must disable interrupts/BH when + * producing. + */ static inline void *ptr_ring_consume(struct ptr_ring *r) { void *ptr; @@ -357,7 +367,7 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, void **old; void *ptr; - while ((ptr = ptr_ring_consume(r))) + while ((ptr = __ptr_ring_consume(r))) if (producer < size) queue[producer++] = ptr; else if (destroy) @@ -372,6 +382,12 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, return old; } +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, void (*destroy)(void *)) { @@ -382,17 +398,25 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, if (!queue) return -ENOMEM; - spin_lock_irqsave(&(r)->producer_lock, flags); + spin_lock_irqsave(&(r)->consumer_lock, flags); + spin_lock(&(r)->producer_lock); old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); - spin_unlock_irqrestore(&(r)->producer_lock, flags); + spin_unlock(&(r)->producer_lock); + spin_unlock_irqrestore(&(r)->consumer_lock, flags); kfree(old); return 0; } +/* + * Note: producer lock is nested within consumer lock, so if you + * resize you must make sure all uses nest correctly. + * In particular if you consume ring in interrupt or BH context, you must + * disable interrupts/BH when doing so. + */ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, int size, gfp_t gfp, void (*destroy)(void *)) @@ -412,10 +436,12 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, } for (i = 0; i < nrings; ++i) { - spin_lock_irqsave(&(rings[i])->producer_lock, flags); + spin_lock_irqsave(&(rings[i])->consumer_lock, flags); + spin_lock(&(rings[i])->producer_lock); queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], size, gfp, destroy); - spin_unlock_irqrestore(&(rings[i])->producer_lock, flags); + spin_unlock(&(rings[i])->producer_lock); + spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); } for (i = 0; i < nrings; ++i) diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index c33080baf38c..52966b9bfde3 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -62,6 +62,7 @@ #define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 #define ISCSI_CDU_TASK_SEG_TYPE 0 +#define FCOE_CDU_TASK_SEG_TYPE 0 #define RDMA_CDU_TASK_SEG_TYPE 1 #define FW_ASSERT_GENERAL_ATTN_IDX 32 @@ -205,6 +206,9 @@ #define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 #define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 #define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 #define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 @@ -261,6 +265,7 @@ #define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) #define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) #define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) @@ -291,6 +296,9 @@ #define DQ_TCM_AGG_FLG_SHIFT_CF6 6 #define DQ_TCM_AGG_FLG_SHIFT_CF7 7 /* TCM agg counter flag selection (FW) */ +#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) +#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) @@ -728,7 +736,7 @@ enum mf_mode { /* Per-protocol connection types */ enum protocol_type { PROTOCOLID_ISCSI, - PROTOCOLID_RESERVED2, + PROTOCOLID_FCOE, PROTOCOLID_ROCE, PROTOCOLID_CORE, PROTOCOLID_ETH, diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h new file mode 100644 index 000000000000..2e417a45c5f7 --- /dev/null +++ b/include/linux/qed/fcoe_common.h @@ -0,0 +1,715 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef __FCOE_COMMON__ +#define __FCOE_COMMON__ +/*********************/ +/* FCOE FW CONSTANTS */ +/*********************/ + +#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 +#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600) + +struct fcoe_abts_pkt { + __le32 abts_rsp_fc_payload_lo; + __le16 abts_rsp_rx_id; + u8 abts_rsp_rctl; + u8 reserved2; +}; + +/* FCoE additional WQE (Sq/XferQ) information */ +union fcoe_additional_info_union { + __le32 previous_tid; + __le32 parent_tid; + __le32 burst_length; + __le32 seq_rec_updated_offset; +}; + +struct fcoe_exp_ro { + __le32 data_offset; + __le32 reserved; +}; + +union fcoe_cleanup_addr_exp_ro_union { + struct regpair abts_rsp_fc_payload_hi; + struct fcoe_exp_ro exp_ro; +}; + +/* FCoE Ramrod Command IDs */ +enum fcoe_completion_status { + FCOE_COMPLETION_STATUS_SUCCESS, + FCOE_COMPLETION_STATUS_FCOE_VER_ERR, + FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, + MAX_FCOE_COMPLETION_STATUS +}; + +struct fc_addr_nw { + u8 addr_lo; + u8 addr_mid; + u8 addr_hi; +}; + +/* FCoE connection offload */ +struct fcoe_conn_offload_ramrod_data { + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 dst_mac_addr_lo; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_hi; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + __le16 src_mac_addr_hi; + __le16 tx_max_fc_pay_len; + __le16 e_d_tov_timer_val; + __le16 rx_max_fc_pay_len; + __le16 vlan_tag; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 + __le16 physical_q0; + __le16 rec_rr_tov_timer_val; + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6 + __le16 conn_id; + u8 def_q_idx; + u8 reserved[5]; +}; + +/* FCoE terminate connection request */ +struct fcoe_conn_terminate_ramrod_data { + struct regpair terminate_params_addr; +}; + +struct fcoe_fast_sgl_ctx { + struct regpair sgl_start_addr; + __le32 sgl_byte_offset; + __le16 task_reuse_cnt; + __le16 init_offset_in_first_sge; +}; + +struct fcoe_slow_sgl_ctx { + struct regpair base_sgl_addr; + __le16 curr_sge_off; + __le16 remainder_num_sges; + __le16 curr_sgl_index; + __le16 reserved; +}; + +struct fcoe_sge { + struct regpair sge_addr; + __le16 size; + __le16 reserved0; + u8 reserved1[3]; + u8 is_valid_sge; +}; + +union fcoe_data_desc_ctx { + struct fcoe_fast_sgl_ctx fast; + struct fcoe_slow_sgl_ctx slow; + struct fcoe_sge single_sge; +}; + +union fcoe_dix_desc_ctx { + struct fcoe_slow_sgl_ctx dix_sgl; + struct fcoe_sge cached_dix_sge; +}; + +struct fcoe_fcp_cmd_payload { + __le32 opaque[8]; +}; + +struct fcoe_fcp_rsp_payload { + __le32 opaque[6]; +}; + +struct fcoe_fcp_xfer_payload { + __le32 opaque[3]; +}; + +/* FCoE firmware function init */ +struct fcoe_init_func_ramrod_data { + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; + __le16 mtu; + __le16 sq_num_pages_in_pbl; + __le32 reserved; +}; + +/* FCoE: Mode of the connection: Target or Initiator or both */ +enum fcoe_mode_type { + FCOE_INITIATOR_MODE = 0x0, + FCOE_TARGET_MODE = 0x1, + FCOE_BOTH_OR_NOT_CHOSEN = 0x3, + MAX_FCOE_MODE_TYPE +}; + +struct fcoe_mstorm_fcoe_task_st_ctx_fp { + __le16 flags; +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15 + __le16 difDataResidue; + __le16 parent_id; + __le16 single_sge_saved_offset; + __le32 data_2_trns_rem; + __le32 offset_in_io; + union fcoe_dix_desc_ctx dix_desc; + union fcoe_data_desc_ctx data_desc; +}; + +struct fcoe_mstorm_fcoe_task_st_ctx_non_fp { + __le16 flags; +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15 + u8 tx_rx_sgl_mode; +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3 +#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6 + u8 rsrv2; + __le32 num_prm_zero_read; + struct regpair rsp_buf_addr; +}; + +struct fcoe_rx_stat { + struct regpair fcoe_rx_byte_cnt; + struct regpair fcoe_rx_data_pkt_cnt; + struct regpair fcoe_rx_xfer_pkt_cnt; + struct regpair fcoe_rx_other_pkt_cnt; + __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; + __le32 fcoe_silent_drop_pkt_rq_full_cnt; + __le32 fcoe_silent_drop_pkt_crc_error_cnt; + __le32 fcoe_silent_drop_pkt_task_invalid_cnt; + __le32 fcoe_silent_drop_total_pkt_cnt; + __le32 rsrv; +}; + +enum fcoe_sgl_mode { + FCOE_SLOW_SGL, + FCOE_SINGLE_FAST_SGE, + FCOE_2_FAST_SGE, + FCOE_3_FAST_SGE, + FCOE_4_FAST_SGE, + FCOE_MUL_FAST_SGES, + MAX_FCOE_SGL_MODE +}; + +struct fcoe_stat_ramrod_data { + struct regpair stat_params_addr; +}; + +struct protection_info_ctx { + __le16 flags; +#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 +#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 +#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F +#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 + u8 dix_block_size; + u8 dst_size; +}; + +union protection_info_union_ctx { + struct protection_info_ctx info; + __le32 value; +}; + +struct fcp_rsp_payload_padded { + struct fcoe_fcp_rsp_payload rsp_payload; + __le32 reserved[2]; +}; + +struct fcp_xfer_payload_padded { + struct fcoe_fcp_xfer_payload xfer_payload; + __le32 reserved[5]; +}; + +struct fcoe_tx_data_params { + __le32 data_offset; + __le32 offset_in_io; + u8 flags; +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 +#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F +#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 + u8 dif_residual; + __le16 seq_cnt; + __le16 single_sge_saved_offset; + __le16 next_dif_offset; + __le16 seq_id; + __le16 reserved3; +}; + +struct fcoe_tx_mid_path_params { + __le32 parameter; + u8 r_ctl; + u8 type; + u8 cs_ctl; + u8 df_ctl; + __le16 rx_id; + __le16 ox_id; +}; + +struct fcoe_tx_params { + struct fcoe_tx_data_params data; + struct fcoe_tx_mid_path_params mid_path; +}; + +union fcoe_tx_info_union_ctx { + struct fcoe_fcp_cmd_payload fcp_cmd_payload; + struct fcp_rsp_payload_padded fcp_rsp_payload; + struct fcp_xfer_payload_padded fcp_xfer_payload; + struct fcoe_tx_params tx_params; +}; + +struct ystorm_fcoe_task_st_ctx { + u8 task_type; + u8 sgl_mode; +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3 + u8 cached_dix_sge; + u8 expect_first_xfer; + __le32 num_pbf_zero_write; + union protection_info_union_ctx protection_info_union; + __le32 data_2_trns_rem; + union fcoe_tx_info_union_ctx tx_info_union; + union fcoe_dix_desc_ctx dix_desc; + union fcoe_data_desc_ctx data_desc; + __le16 ox_id; + __le16 rx_id; + __le32 task_rety_identifier; + __le32 reserved1[2]; +}; + +struct ystorm_fcoe_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 rx_id; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le32 reg1; + __le32 reg2; +}; + +struct tstorm_fcoe_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 + u8 flags1; +#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 + u8 flags3; +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 +#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 cleanup_state; + __le16 last_sent_tid; + __le32 rec_rr_tov_exp_timeout; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 data_offset_end_of_seq; + __le32 data_offset_next; +}; + +struct fcoe_tstorm_fcoe_task_st_ctx_read_write { + union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; + __le16 flags; +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10 + __le16 seq_cnt; + u8 seq_id; + u8 ooo_rx_seq_id; + __le16 rx_id; + struct fcoe_abts_pkt abts_data; + __le32 e_d_tov_exp_timeout_val; + __le16 ooo_rx_seq_cnt; + __le16 reserved1; +}; + +struct fcoe_tstorm_fcoe_task_st_ctx_read_only { + u8 task_type; + u8 dev_type; + u8 conf_supported; + u8 glbl_q_num; + __le32 cid; + __le32 fcp_cmd_trns_size; + __le32 rsrv; +}; + +struct tstorm_fcoe_task_st_ctx { + struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; + struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; +}; + +struct mstorm_fcoe_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 icid; + u8 flags0; +#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 +#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 +#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 cleanup_state; + __le32 received_bytes; + u8 byte3; + u8 glbl_q_num; + __le16 word1; + __le16 tid_to_xfer; + __le16 word3; + __le16 word4; + __le16 word5; + __le32 expected_bytes; + __le32 reg2; +}; + +struct mstorm_fcoe_task_st_ctx { + struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp; + struct fcoe_mstorm_fcoe_task_st_ctx_fp fp; +}; + +struct ustorm_fcoe_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 + u8 flags3; +#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 global_cq_num; + __le32 reg3; + __le32 reg4; + __le32 reg5; +}; + +struct fcoe_task_context { + struct ystorm_fcoe_task_st_ctx ystorm_st_context; + struct tdif_task_context tdif_context; + struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; + struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; + struct timers_context timer_context; + struct tstorm_fcoe_task_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct mstorm_fcoe_task_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_task_st_ctx mstorm_st_context; + struct ustorm_fcoe_task_ag_ctx ustorm_ag_context; + struct rdif_task_context rdif_context; +}; + +struct fcoe_tx_stat { + struct regpair fcoe_tx_byte_cnt; + struct regpair fcoe_tx_data_pkt_cnt; + struct regpair fcoe_tx_xfer_pkt_cnt; + struct regpair fcoe_tx_other_pkt_cnt; +}; + +struct fcoe_wqe { + __le16 task_id; + __le16 flags; +#define FCOE_WQE_REQ_TYPE_MASK 0xF +#define FCOE_WQE_REQ_TYPE_SHIFT 0 +#define FCOE_WQE_SGL_MODE_MASK 0x7 +#define FCOE_WQE_SGL_MODE_SHIFT 4 +#define FCOE_WQE_CONTINUATION_MASK 0x1 +#define FCOE_WQE_CONTINUATION_SHIFT 7 +#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1 +#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8 +#define FCOE_WQE_SUPER_IO_MASK 0x1 +#define FCOE_WQE_SUPER_IO_SHIFT 9 +#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 +#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10 +#define FCOE_WQE_RESERVED0_MASK 0x1F +#define FCOE_WQE_RESERVED0_SHIFT 11 + union fcoe_additional_info_union additional_info_union; +}; + +struct xfrqe_prot_flags { + u8 flags; +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 +#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 +#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 +}; + +struct fcoe_db_data { + u8 params; +#define FCOE_DB_DATA_DEST_MASK 0x3 +#define FCOE_DB_DATA_DEST_SHIFT 0 +#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 +#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 +#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define FCOE_DB_DATA_RESERVED_MASK 0x1 +#define FCOE_DB_DATA_RESERVED_SHIFT 5 +#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; +#endif /* __FCOE_COMMON__ */ diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h new file mode 100644 index 000000000000..bd6bcb809415 --- /dev/null +++ b/include/linux/qed/qed_fcoe_if.h @@ -0,0 +1,145 @@ +#ifndef _QED_FCOE_IF_H +#define _QED_FCOE_IF_H +#include <linux/types.h> +#include <linux/qed/qed_if.h> +struct qed_fcoe_stats { + u64 fcoe_rx_byte_cnt; + u64 fcoe_rx_data_pkt_cnt; + u64 fcoe_rx_xfer_pkt_cnt; + u64 fcoe_rx_other_pkt_cnt; + u32 fcoe_silent_drop_pkt_cmdq_full_cnt; + u32 fcoe_silent_drop_pkt_rq_full_cnt; + u32 fcoe_silent_drop_pkt_crc_error_cnt; + u32 fcoe_silent_drop_pkt_task_invalid_cnt; + u32 fcoe_silent_drop_total_pkt_cnt; + + u64 fcoe_tx_byte_cnt; + u64 fcoe_tx_data_pkt_cnt; + u64 fcoe_tx_xfer_pkt_cnt; + u64 fcoe_tx_other_pkt_cnt; +}; + +struct qed_dev_fcoe_info { + struct qed_dev_info common; + + void __iomem *primary_dbq_rq_addr; + void __iomem *secondary_bdq_rq_addr; +}; + +struct qed_fcoe_params_offload { + dma_addr_t sq_pbl_addr; + dma_addr_t sq_curr_page_addr; + dma_addr_t sq_next_page_addr; + + u8 src_mac[ETH_ALEN]; + u8 dst_mac[ETH_ALEN]; + + u16 tx_max_fc_pay_len; + u16 e_d_tov_timer_val; + u16 rec_tov_timer_val; + u16 rx_max_fc_pay_len; + u16 vlan_tag; + + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; + u8 def_q_idx; +}; + +#define MAX_TID_BLOCKS_FCOE (512) +struct qed_fcoe_tid { + u32 size; /* In bytes per task */ + u32 num_tids_per_block; + u8 *blocks[MAX_TID_BLOCKS_FCOE]; +}; + +struct qed_fcoe_cb_ops { + struct qed_common_cb_ops common; + u32 (*get_login_failures)(void *cookie); +}; + +void qed_fcoe_set_pf_params(struct qed_dev *cdev, + struct qed_fcoe_pf_params *params); + +/** + * struct qed_fcoe_ops - qed FCoE operations. + * @common: common operations pointer + * @fill_dev_info: fills FCoE specific information + * @param cdev + * @param info + * @return 0 on sucesss, otherwise error value. + * @register_ops: register FCoE operations + * @param cdev + * @param ops - specified using qed_iscsi_cb_ops + * @param cookie - driver private + * @ll2: light L2 operations pointer + * @start: fcoe in FW + * @param cdev + * @param tasks - qed will fill information about tasks + * return 0 on success, otherwise error value. + * @stop: stops fcoe in FW + * @param cdev + * return 0 on success, otherwise error value. + * @acquire_conn: acquire a new fcoe connection + * @param cdev + * @param handle - qed will fill handle that should be + * used henceforth as identifier of the + * connection. + * @param p_doorbell - qed will fill the address of the + * doorbell. + * return 0 on sucesss, otherwise error value. + * @release_conn: release a previously acquired fcoe connection + * @param cdev + * @param handle - the connection handle. + * return 0 on success, otherwise error value. + * @offload_conn: configures an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param conn_info - the configuration to use for the + * offload. + * return 0 on success, otherwise error value. + * @destroy_conn: stops an offloaded connection + * @param cdev + * @param handle - the connection handle. + * @param terminate_params + * return 0 on success, otherwise error value. + * @get_stats: gets FCoE related statistics + * @param cdev + * @param stats - pointer to struck that would be filled + * we stats + * return 0 on success, error otherwise. + */ +struct qed_fcoe_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_fcoe_info *info); + + void (*register_ops)(struct qed_dev *cdev, + struct qed_fcoe_cb_ops *ops, void *cookie); + + const struct qed_ll2_ops *ll2; + + int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks); + + int (*stop)(struct qed_dev *cdev); + + int (*acquire_conn)(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell); + + int (*release_conn)(struct qed_dev *cdev, u32 handle); + + int (*offload_conn)(struct qed_dev *cdev, + u32 handle, + struct qed_fcoe_params_offload *conn_info); + int (*destroy_conn)(struct qed_dev *cdev, + u32 handle, dma_addr_t terminate_params); + + int (*get_stats)(struct qed_dev *cdev, struct qed_fcoe_stats *stats); +}; + +const struct qed_fcoe_ops *qed_get_fcoe_ops(void); +void qed_put_fcoe_ops(void); +#endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index d1576a2bcfc9..fde56c436f71 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -59,7 +59,6 @@ enum dcbx_protocol_type { #define QED_ROCE_PROTOCOL_INDEX (3) -#ifdef CONFIG_DCB #define QED_LLDP_CHASSIS_ID_STAT_LEN 4 #define QED_LLDP_PORT_ID_STAT_LEN 4 #define QED_DCBX_MAX_APP_PROTOCOL 32 @@ -155,7 +154,6 @@ struct qed_dcbx_get { struct qed_dcbx_remote_params remote; struct qed_dcbx_admin_params local; }; -#endif enum qed_led_mode { QED_LED_MODE_OFF, @@ -182,6 +180,38 @@ struct qed_eth_pf_params { u16 num_cons; }; +struct qed_fcoe_pf_params { + /* The following parameters are used during protocol-init */ + u64 glbl_q_params_addr; + u64 bdq_pbl_base_addr[2]; + + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; + u16 num_tasks; + + /* The following parameters are used during protocol-init */ + u16 sq_num_pbl_pages; + + u16 cq_num_entries; + u16 cmdq_num_entries; + u16 rq_buffer_log_size; + u16 mtu; + u16 dummy_icid; + u16 bdq_xoff_threshold[2]; + u16 bdq_xon_threshold[2]; + u16 rq_buffer_size; + u8 num_cqs; /* num of global CQs */ + u8 log_page_size; + u8 gl_rq_pi; + u8 gl_cmd_pi; + u8 debug_mode; + u8 is_target; + u8 bdq_pbl_num_entries[2]; +}; + /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; @@ -245,6 +275,7 @@ struct qed_rdma_pf_params { struct qed_pf_params { struct qed_eth_pf_params eth_pf_params; + struct qed_fcoe_pf_params fcoe_pf_params; struct qed_iscsi_pf_params iscsi_pf_params; struct qed_rdma_pf_params rdma_pf_params; }; @@ -305,6 +336,7 @@ enum qed_sb_type { enum qed_protocol { QED_PROTOCOL_ETH, QED_PROTOCOL_ISCSI, + QED_PROTOCOL_FCOE, }; enum qed_link_mode_bits { @@ -391,6 +423,7 @@ struct qed_int_info { struct qed_common_cb_ops { void (*link_update)(void *dev, struct qed_link_output *link); + void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); }; struct qed_selftest_ops { @@ -494,6 +527,10 @@ struct qed_common_ops { void (*simd_handler_clean)(struct qed_dev *cdev, int index); + int (*dbg_grc)(struct qed_dev *cdev, + void *buffer, u32 *num_dumped_bytes); + + int (*dbg_grc_size)(struct qed_dev *cdev); int (*dbg_all_data) (struct qed_dev *cdev, void *buffer); diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 5c132d3188be..f2e12a845910 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -61,6 +61,7 @@ struct rhlist_head { /** * struct bucket_table - Table of hash buckets * @size: Number of hash buckets + * @nest: Number of bits of first-level nested table. * @rehash: Current bucket being rehashed * @hash_rnd: Random seed to fold into hash * @locks_mask: Mask to apply before accessing locks[] @@ -68,10 +69,12 @@ struct rhlist_head { * @walkers: List of active walkers * @rcu: RCU structure for freeing the table * @future_tbl: Table under construction during rehashing + * @ntbl: Nested table used when out of memory. * @buckets: size * hash buckets */ struct bucket_table { unsigned int size; + unsigned int nest; unsigned int rehash; u32 hash_rnd; unsigned int locks_mask; @@ -81,7 +84,7 @@ struct bucket_table { struct bucket_table __rcu *future_tbl; - struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; + struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; /** @@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void *arg); void rhashtable_destroy(struct rhashtable *ht); +struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, + unsigned int hash); +struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, + struct bucket_table *tbl, + unsigned int hash); + #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) @@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht); #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) +static inline struct rhash_head __rcu *const *rht_bucket( + const struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : + &tbl->buckets[hash]; +} + +static inline struct rhash_head __rcu **rht_bucket_var( + struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : + &tbl->buckets[hash]; +} + +static inline struct rhash_head __rcu **rht_bucket_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) +{ + return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : + &tbl->buckets[hash]; +} + /** * rht_for_each_continue - continue iterating over hash chain * @pos: the &struct rhash_head to use as a loop cursor. @@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht); * @hash: the hash value / bucket index */ #define rht_for_each(pos, tbl, hash) \ - rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) + rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash) /** * rht_for_each_entry_continue - continue iterating over hash chain @@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht); * @member: name of the &struct rhash_head within the hashable struct. */ #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ - rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ + rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \ tbl, hash, member) /** @@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht); * This hash chain list-traversal primitive allows for the looped code to * remove the loop cursor from the list. */ -#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ - for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ - next = !rht_is_a_nulls(pos) ? \ - rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ - (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ - pos = next, \ - next = !rht_is_a_nulls(pos) ? \ +#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ + for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \ + next = !rht_is_a_nulls(pos) ? \ + rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ + (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ + pos = next, \ + next = !rht_is_a_nulls(pos) ? \ rht_dereference_bucket(pos->next, tbl, hash) : NULL) /** @@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht); * traversal is guarded by rcu_read_lock(). */ #define rht_for_each_rcu(pos, tbl, hash) \ - rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) + rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash) /** * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain @@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht); * the _rcu mutation primitives such as rhashtable_insert() as long as the * traversal is guarded by rcu_read_lock(). */ -#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ - rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ +#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ + rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \ tbl, hash, member) /** @@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; - const struct bucket_table *tbl; + struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; @@ -697,8 +727,12 @@ slow_path: } elasticity = ht->elasticity; - pprev = &tbl->buckets[hash]; - rht_for_each(head, tbl, hash) { + pprev = rht_bucket_insert(ht, tbl, hash); + data = ERR_PTR(-ENOMEM); + if (!pprev) + goto out; + + rht_for_each_continue(head, *pprev, tbl, hash) { struct rhlist_head *plist; struct rhlist_head *list; @@ -736,7 +770,7 @@ slow_path: if (unlikely(rht_grow_above_100(ht, tbl))) goto slow_path; - head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + head = rht_dereference_bucket(*pprev, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (rhlist) { @@ -746,7 +780,7 @@ slow_path: RCU_INIT_POINTER(list->next, NULL); } - rcu_assign_pointer(tbl->buckets[hash], obj); + rcu_assign_pointer(*pprev, obj); atomic_inc(&ht->nelems); if (rht_grow_above_75(ht, tbl)) @@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one( spin_lock_bh(lock); - pprev = &tbl->buckets[hash]; - rht_for_each(he, tbl, hash) { + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(he, *pprev, tbl, hash) { struct rhlist_head *list; list = container_of(he, struct rhlist_head, rhead); @@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast( spin_lock_bh(lock); - pprev = &tbl->buckets[hash]; - rht_for_each(he, tbl, hash) { + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(he, *pprev, tbl, hash) { if (he != obj_old) { pprev = &he->next; continue; diff --git a/include/linux/sctp.h b/include/linux/sctp.h index b055788de0cf..7a4804c4a593 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -749,4 +749,28 @@ struct sctp_strreset_addstrm { __u16 reserved; }; +enum { + SCTP_STRRESET_NOTHING_TO_DO = 0x00, + SCTP_STRRESET_PERFORMED = 0x01, + SCTP_STRRESET_DENIED = 0x02, + SCTP_STRRESET_ERR_WRONG_SSN = 0x03, + SCTP_STRRESET_ERR_IN_PROGRESS = 0x04, + SCTP_STRRESET_ERR_BAD_SEQNO = 0x05, + SCTP_STRRESET_IN_PROGRESS = 0x06, +}; + +struct sctp_strreset_resp { + sctp_paramhdr_t param_hdr; + __u32 response_seq; + __u32 result; +}; + +struct sctp_strreset_resptsn { + sctp_paramhdr_t param_hdr; + __u32 response_seq; + __u32 result; + __u32 senders_next_tsn; + __u32 receivers_next_tsn; +}; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index be5c12a5c375..269fd78bb0ae 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -481,6 +481,11 @@ static inline bool tc_flags_valid(u32 flags) return true; } +static inline bool tc_in_hw(u32 flags) +{ + return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; +} + enum tc_fl_command { TC_CLSFLOWER_REPLACE, TC_CLSFLOWER_DESTROY, diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 3567c971cf3b..b07a745ab69f 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -60,11 +60,14 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM }; #define SCTP_NUM_PRSCTP_CHUNK_TYPES 1 +#define SCTP_NUM_RECONF_CHUNK_TYPES 1 + #define SCTP_NUM_AUTH_CHUNK_TYPES 1 #define SCTP_NUM_CHUNK_TYPES (SCTP_NUM_BASE_CHUNK_TYPES + \ SCTP_NUM_ADDIP_CHUNK_TYPES +\ SCTP_NUM_PRSCTP_CHUNK_TYPES +\ + SCTP_NUM_RECONF_CHUNK_TYPES +\ SCTP_NUM_AUTH_CHUNK_TYPES) /* These are the different flavours of event. */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 6dfc5536a3e6..1f71ee5ab518 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -596,7 +596,9 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) */ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) { - if (t->dst && !dst_check(t->dst, t->dst_cookie)) + if (t->dst && (!dst_check(t->dst, t->dst_cookie) || + t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)), + SCTP_DEFAULT_MINSEGMENT))) sctp_transport_dst_release(t); return t->dst; diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 3675fde3a26e..b6f682ec184a 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -135,6 +135,7 @@ sctp_state_fn_t sctp_sf_do_8_5_1_E_sa; sctp_state_fn_t sctp_sf_cookie_echoed_err; sctp_state_fn_t sctp_sf_do_asconf; sctp_state_fn_t sctp_sf_do_asconf_ack; +sctp_state_fn_t sctp_sf_do_reconf; sctp_state_fn_t sctp_sf_do_9_2_reshutack; sctp_state_fn_t sctp_sf_eat_fwd_tsn; sctp_state_fn_t sctp_sf_eat_fwd_tsn_fast; @@ -270,9 +271,29 @@ struct sctp_chunk *sctp_make_strreset_tsnreq( struct sctp_chunk *sctp_make_strreset_addstrm( const struct sctp_association *asoc, __u16 out, __u16 in); +struct sctp_chunk *sctp_make_strreset_resp( + const struct sctp_association *asoc, + __u32 result, __u32 sn); +struct sctp_chunk *sctp_make_strreset_tsnresp( + struct sctp_association *asoc, + __u32 result, __u32 sn, + __u32 sender_tsn, __u32 receiver_tsn); +bool sctp_verify_reconf(const struct sctp_association *asoc, + struct sctp_chunk *chunk, + struct sctp_paramhdr **errp); void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); +/* Prototypes for stream-processing functions. */ +struct sctp_chunk *sctp_process_strreset_outreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp); +struct sctp_chunk *sctp_process_strreset_inreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp); + /* Prototypes for statetable processing. */ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 387c802bf248..a244db5e5ff7 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -497,6 +497,7 @@ struct sctp_datamsg { /* Did the messenge fail to send? */ int send_error; u8 send_failed:1, + force_delay:1, can_delay; /* should this message be Nagle delayed */ }; diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 2c098cd7e7e2..324b5965fc4d 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -128,6 +128,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_authkey( struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( const struct sctp_association *asoc, gfp_t gfp); +struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( + const struct sctp_association *asoc, __u16 flags, + __u16 stream_num, __u16 *stream_list, gfp_t gfp); + void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *); void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, diff --git a/include/net/xfrm.h b/include/net/xfrm.h index d9a81dcef53e..14d82bf16692 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -280,9 +280,7 @@ struct net_device; struct xfrm_type; struct xfrm_dst; struct xfrm_policy_afinfo { - unsigned short family; struct dst_ops *dst_ops; - void (*garbage_collect)(struct net *net); struct dst_entry *(*dst_lookup)(struct net *net, int tos, int oif, const xfrm_address_t *saddr, @@ -303,8 +301,8 @@ struct xfrm_policy_afinfo { struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig); }; -int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); -int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); +int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family); +void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo); void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c); void km_state_notify(struct xfrm_state *x, const struct km_event *c); @@ -349,13 +347,12 @@ struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family); struct xfrm_input_afinfo { unsigned int family; - struct module *owner; int (*callback)(struct sk_buff *skb, u8 protocol, int err); }; -int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo); -int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo); +int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo); +int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo); void xfrm_state_delete_tunnel(struct xfrm_state *x); @@ -501,6 +498,7 @@ struct xfrm_tmpl { }; #define XFRM_MAX_DEPTH 6 +#define XFRM_MAX_OFFLOAD_DEPTH 1 struct xfrm_policy_walk_entry { struct list_head all; @@ -684,6 +682,7 @@ struct xfrm_spi_skb_cb { unsigned int daddroff; unsigned int family; + __be32 seq; }; #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0])) @@ -976,10 +975,41 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); +struct xfrm_offload { + /* Output sequence number for replay protection on offloading. */ + struct { + __u32 low; + __u32 hi; + } seq; + + __u32 flags; +#define SA_DELETE_REQ 1 +#define CRYPTO_DONE 2 +#define CRYPTO_NEXT_DONE 4 +#define CRYPTO_FALLBACK 8 +#define XFRM_GSO_SEGMENT 16 +#define XFRM_GRO 32 + + __u32 status; +#define CRYPTO_SUCCESS 1 +#define CRYPTO_GENERIC_ERROR 2 +#define CRYPTO_TRANSPORT_AH_AUTH_FAILED 4 +#define CRYPTO_TRANSPORT_ESP_AUTH_FAILED 8 +#define CRYPTO_TUNNEL_AH_AUTH_FAILED 16 +#define CRYPTO_TUNNEL_ESP_AUTH_FAILED 32 +#define CRYPTO_INVALID_PACKET_SYNTAX 64 +#define CRYPTO_INVALID_PROTOCOL 128 + + __u8 proto; +}; + struct sec_path { atomic_t refcnt; int len; + int olen; + struct xfrm_state *xvec[XFRM_MAX_DEPTH]; + struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH]; }; static inline int secpath_exists(struct sk_buff *skb) @@ -1009,6 +1039,7 @@ secpath_put(struct sec_path *sp) } struct sec_path *secpath_dup(struct sec_path *src); +int secpath_set(struct sk_buff *skb); static inline void secpath_reset(struct sk_buff *skb) @@ -1170,6 +1201,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk) } void xfrm_garbage_collect(struct net *net); +void xfrm_garbage_collect_deferred(struct net *net); #else @@ -1521,6 +1553,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm4_transport_finish(struct sk_buff *skb, int async); int xfrm4_rcv(struct sk_buff *skb); +int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) { @@ -1776,6 +1809,15 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) { return skb->sp->xvec[skb->sp->len - 1]; } +static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) +{ + struct sec_path *sp = skb->sp; + + if (!sp || !sp->olen || sp->len != sp->olen) + return NULL; + + return &sp->ovec[sp->olen - 1]; +} #endif static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e07fd5a324e6..0539a0ceef38 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -123,6 +123,12 @@ enum bpf_attach_type { #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE +/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command + * to the given target_fd cgroup the descendent cgroup will be able to + * override effective bpf program that was inherited from this cgroup + */ +#define BPF_F_ALLOW_OVERRIDE (1U << 0) + #define BPF_PSEUDO_MAP_FD 1 /* flags for BPF_MAP_UPDATE_ELEM command */ @@ -178,6 +184,7 @@ union bpf_attr { __u32 target_fd; /* container object to attach to */ __u32 attach_bpf_fd; /* eBPF program to attach */ __u32 attach_type; + __u32 attach_flags; }; } __attribute__((aligned(8))); diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h index f6598d1c886e..85bbb1799df3 100644 --- a/include/uapi/linux/ipv6_route.h +++ b/include/uapi/linux/ipv6_route.h @@ -14,6 +14,7 @@ #define _UAPI_LINUX_IPV6_ROUTE_H #include <linux/types.h> +#include <linux/in6.h> /* For struct in6_addr. */ #define RTF_DEFAULT 0x00010000 /* default - learned via ND */ #define RTF_ALLONLINK 0x00020000 /* (deprecated and will be removed) diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index 85ddb74fcd1c..b23c1914a182 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -9,9 +9,8 @@ #include <linux/types.h> #include <linux/socket.h> -#ifndef __KERNEL__ -#include <netinet/in.h> -#endif +#include <linux/in.h> +#include <linux/in6.h> #define IPPROTO_L2TP 115 @@ -31,7 +30,7 @@ struct sockaddr_l2tpip { __u32 l2tp_conn_id; /* Connection ID of tunnel */ /* Pad to size of `struct sockaddr'. */ - unsigned char __pad[sizeof(struct sockaddr) - + unsigned char __pad[__SOCK_SIZE__ - sizeof(__kernel_sa_family_t) - sizeof(__be16) - sizeof(struct in_addr) - sizeof(__u32)]; diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h index cf943016930f..1fe4c1e7d66e 100644 --- a/include/uapi/linux/mroute.h +++ b/include/uapi/linux/mroute.h @@ -3,6 +3,7 @@ #include <linux/sockios.h> #include <linux/types.h> +#include <linux/in.h> /* For struct in_addr. */ /* Based on the MROUTING 3.5 defines primarily to keep * source compatibility with BSD. diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h index 5062fb5751e1..ed5721148768 100644 --- a/include/uapi/linux/mroute6.h +++ b/include/uapi/linux/mroute6.h @@ -4,6 +4,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/sockios.h> +#include <linux/in6.h> /* For struct sockaddr_in6. */ /* * Based on the MROUTING 3.5 defines primarily to keep diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h index 45dfad509c4d..7e5f0f3e31bf 100644 --- a/include/uapi/linux/netconf.h +++ b/include/uapi/linux/netconf.h @@ -16,6 +16,7 @@ enum { NETCONFA_MC_FORWARDING, NETCONFA_PROXY_NEIGH, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, + NETCONFA_INPUT, __NETCONFA_MAX }; #define NETCONFA_MAX (__NETCONFA_MAX - 1) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 345551e71410..7a69f2a4ca0c 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -103,8 +103,10 @@ enum { #define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1) /* tca flags definitions */ -#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) -#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) +#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) /* don't offload filter to HW */ +#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) /* don't use filter in SW */ +#define TCA_CLS_FLAGS_IN_HW (1 << 2) /* filter is offloaded to HW */ +#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */ /* U32 filters */ diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 3833113ab2c0..47c03ca5c404 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -35,6 +35,7 @@ #define _LINUX_RDS_H #include <linux/types.h> +#include <linux/socket.h> /* For __kernel_sockaddr_storage. */ #define RDS_IB_ABI_VERSION 0x301 @@ -117,8 +118,8 @@ #define RDS_INFO_LAST 10010 struct rds_info_counter { - uint8_t name[32]; - uint64_t value; + __u8 name[32]; + __u64 value; } __attribute__((packed)); #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 @@ -128,35 +129,35 @@ struct rds_info_counter { #define TRANSNAMSIZ 16 struct rds_info_connection { - uint64_t next_tx_seq; - uint64_t next_rx_seq; + __u64 next_tx_seq; + __u64 next_rx_seq; __be32 laddr; __be32 faddr; - uint8_t transport[TRANSNAMSIZ]; /* null term ascii */ - uint8_t flags; + __u8 transport[TRANSNAMSIZ]; /* null term ascii */ + __u8 flags; } __attribute__((packed)); #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 struct rds_info_message { - uint64_t seq; - uint32_t len; + __u64 seq; + __u32 len; __be32 laddr; __be32 faddr; __be16 lport; __be16 fport; - uint8_t flags; + __u8 flags; } __attribute__((packed)); struct rds_info_socket { - uint32_t sndbuf; + __u32 sndbuf; __be32 bound_addr; __be32 connected_addr; __be16 bound_port; __be16 connected_port; - uint32_t rcvbuf; - uint64_t inum; + __u32 rcvbuf; + __u64 inum; } __attribute__((packed)); struct rds_info_tcp_socket { @@ -164,25 +165,25 @@ struct rds_info_tcp_socket { __be16 local_port; __be32 peer_addr; __be16 peer_port; - uint64_t hdr_rem; - uint64_t data_rem; - uint32_t last_sent_nxt; - uint32_t last_expected_una; - uint32_t last_seen_una; + __u64 hdr_rem; + __u64 data_rem; + __u32 last_sent_nxt; + __u32 last_expected_una; + __u32 last_seen_una; } __attribute__((packed)); #define RDS_IB_GID_LEN 16 struct rds_info_rdma_connection { __be32 src_addr; __be32 dst_addr; - uint8_t src_gid[RDS_IB_GID_LEN]; - uint8_t dst_gid[RDS_IB_GID_LEN]; - - uint32_t max_send_wr; - uint32_t max_recv_wr; - uint32_t max_send_sge; - uint32_t rdma_mr_max; - uint32_t rdma_mr_size; + __u8 src_gid[RDS_IB_GID_LEN]; + __u8 dst_gid[RDS_IB_GID_LEN]; + + __u32 max_send_wr; + __u32 max_recv_wr; + __u32 max_send_sge; + __u32 rdma_mr_max; + __u32 rdma_mr_size; }; /* RDS message Receive Path Latency points */ @@ -242,70 +243,70 @@ struct rds_cmsg_rx_trace { * (so that the application does not have to worry about * alignment). */ -typedef uint64_t rds_rdma_cookie_t; +typedef __u64 rds_rdma_cookie_t; struct rds_iovec { - uint64_t addr; - uint64_t bytes; + __u64 addr; + __u64 bytes; }; struct rds_get_mr_args { struct rds_iovec vec; - uint64_t cookie_addr; - uint64_t flags; + __u64 cookie_addr; + __u64 flags; }; struct rds_get_mr_for_dest_args { - struct sockaddr_storage dest_addr; + struct __kernel_sockaddr_storage dest_addr; struct rds_iovec vec; - uint64_t cookie_addr; - uint64_t flags; + __u64 cookie_addr; + __u64 flags; }; struct rds_free_mr_args { rds_rdma_cookie_t cookie; - uint64_t flags; + __u64 flags; }; struct rds_rdma_args { rds_rdma_cookie_t cookie; struct rds_iovec remote_vec; - uint64_t local_vec_addr; - uint64_t nr_local; - uint64_t flags; - uint64_t user_token; + __u64 local_vec_addr; + __u64 nr_local; + __u64 flags; + __u64 user_token; }; struct rds_atomic_args { rds_rdma_cookie_t cookie; - uint64_t local_addr; - uint64_t remote_addr; + __u64 local_addr; + __u64 remote_addr; union { struct { - uint64_t compare; - uint64_t swap; + __u64 compare; + __u64 swap; } cswp; struct { - uint64_t add; + __u64 add; } fadd; struct { - uint64_t compare; - uint64_t swap; - uint64_t compare_mask; - uint64_t swap_mask; + __u64 compare; + __u64 swap; + __u64 compare_mask; + __u64 swap_mask; } m_cswp; struct { - uint64_t add; - uint64_t nocarry_mask; + __u64 add; + __u64 nocarry_mask; } m_fadd; }; - uint64_t flags; - uint64_t user_token; + __u64 flags; + __u64 user_token; }; struct rds_rdma_notify { - uint64_t user_token; - int32_t status; + __u64 user_token; + __s32 status; }; #define RDS_RDMA_SUCCESS 0 diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 8c93ad1ef9ab..6546917d605a 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -658,6 +658,8 @@ enum rtnetlink_groups { #define RTNLGRP_MPLS_ROUTE RTNLGRP_MPLS_ROUTE RTNLGRP_NSID, #define RTNLGRP_NSID RTNLGRP_NSID + RTNLGRP_MPLS_NETCONF, +#define RTNLGRP_MPLS_NETCONF RTNLGRP_MPLS_NETCONF __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index a91a9cccbae6..d3ae381fcf33 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -490,6 +490,18 @@ struct sctp_sender_dry_event { sctp_assoc_t sender_dry_assoc_id; }; +#define SCTP_STREAM_RESET_INCOMING_SSN 0x0001 +#define SCTP_STREAM_RESET_OUTGOING_SSN 0x0002 +#define SCTP_STREAM_RESET_DENIED 0x0004 +#define SCTP_STREAM_RESET_FAILED 0x0008 +struct sctp_stream_reset_event { + __u16 strreset_type; + __u16 strreset_flags; + __u32 strreset_length; + sctp_assoc_t strreset_assoc_id; + __u16 strreset_stream_list[]; +}; + /* * Described in Section 7.3 * Ancillary Data and Notification Interest Options @@ -505,6 +517,7 @@ struct sctp_event_subscribe { __u8 sctp_adaptation_layer_event; __u8 sctp_authentication_event; __u8 sctp_sender_dry_event; + __u8 sctp_stream_reset_event; }; /* @@ -529,6 +542,7 @@ union sctp_notification { struct sctp_pdapi_event sn_pdapi_event; struct sctp_authkey_event sn_authkey_event; struct sctp_sender_dry_event sn_sender_dry_event; + struct sctp_stream_reset_event sn_strreset_event; }; /* Section 5.3.1 @@ -556,6 +570,8 @@ enum sctp_sn_type { #define SCTP_AUTHENTICATION_INDICATION SCTP_AUTHENTICATION_EVENT SCTP_SENDER_DRY_EVENT, #define SCTP_SENDER_DRY_EVENT SCTP_SENDER_DRY_EVENT + SCTP_STREAM_RESET_EVENT, +#define SCTP_STREAM_RESET_EVENT SCTP_STREAM_RESET_EVENT }; /* Notification error codes used to fill up the error fields in some diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 46e8a2e369f9..45184a2ef66c 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -362,8 +362,8 @@ enum v4l2_quantization { /* * The default for R'G'B' quantization is always full range, except * for the BT2020 colorspace. For Y'CbCr the quantization is always - * limited range, except for COLORSPACE_JPEG, SRGB, ADOBERGB, - * XV601 or XV709: those are full range. + * limited range, except for COLORSPACE_JPEG, XV601 or XV709: those + * are full range. */ V4L2_QUANTIZATION_DEFAULT = 0, V4L2_QUANTIZATION_FULL_RANGE = 1, @@ -379,8 +379,7 @@ enum v4l2_quantization { (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \ V4L2_QUANTIZATION_LIM_RANGE : \ (((is_rgb_or_hsv) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \ - (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) || \ - (colsp) == V4L2_COLORSPACE_ADOBERGB || (colsp) == V4L2_COLORSPACE_SRGB ? \ + (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \ V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)) enum v4l2_priority { diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 3d55d95dcf49..6b6f41f0b211 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -269,7 +269,7 @@ static const struct bpf_map_ops array_ops = { .map_delete_elem = array_map_delete_elem, }; -static struct bpf_map_type_list array_type __read_mostly = { +static struct bpf_map_type_list array_type __ro_after_init = { .ops = &array_ops, .type = BPF_MAP_TYPE_ARRAY, }; @@ -283,7 +283,7 @@ static const struct bpf_map_ops percpu_array_ops = { .map_delete_elem = array_map_delete_elem, }; -static struct bpf_map_type_list percpu_array_type __read_mostly = { +static struct bpf_map_type_list percpu_array_type __ro_after_init = { .ops = &percpu_array_ops, .type = BPF_MAP_TYPE_PERCPU_ARRAY, }; @@ -409,7 +409,7 @@ static const struct bpf_map_ops prog_array_ops = { .map_fd_put_ptr = prog_fd_array_put_ptr, }; -static struct bpf_map_type_list prog_array_type __read_mostly = { +static struct bpf_map_type_list prog_array_type __ro_after_init = { .ops = &prog_array_ops, .type = BPF_MAP_TYPE_PROG_ARRAY, }; @@ -522,7 +522,7 @@ static const struct bpf_map_ops perf_event_array_ops = { .map_release = perf_event_fd_array_release, }; -static struct bpf_map_type_list perf_event_array_type __read_mostly = { +static struct bpf_map_type_list perf_event_array_type __ro_after_init = { .ops = &perf_event_array_ops, .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, }; @@ -564,7 +564,7 @@ static const struct bpf_map_ops cgroup_array_ops = { .map_fd_put_ptr = cgroup_fd_array_put_ptr, }; -static struct bpf_map_type_list cgroup_array_type __read_mostly = { +static struct bpf_map_type_list cgroup_array_type __ro_after_init = { .ops = &cgroup_array_ops, .type = BPF_MAP_TYPE_CGROUP_ARRAY, }; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index a515f7b007c6..da0f53690295 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -52,6 +52,7 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent) e = rcu_dereference_protected(parent->bpf.effective[type], lockdep_is_held(&cgroup_mutex)); rcu_assign_pointer(cgrp->bpf.effective[type], e); + cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type]; } } @@ -82,30 +83,63 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent) * * Must be called with cgroup_mutex held. */ -void __cgroup_bpf_update(struct cgroup *cgrp, - struct cgroup *parent, - struct bpf_prog *prog, - enum bpf_attach_type type) +int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent, + struct bpf_prog *prog, enum bpf_attach_type type, + bool new_overridable) { - struct bpf_prog *old_prog, *effective; + struct bpf_prog *old_prog, *effective = NULL; struct cgroup_subsys_state *pos; + bool overridable = true; - old_prog = xchg(cgrp->bpf.prog + type, prog); + if (parent) { + overridable = !parent->bpf.disallow_override[type]; + effective = rcu_dereference_protected(parent->bpf.effective[type], + lockdep_is_held(&cgroup_mutex)); + } + + if (prog && effective && !overridable) + /* if parent has non-overridable prog attached, disallow + * attaching new programs to descendent cgroup + */ + return -EPERM; + + if (prog && effective && overridable != new_overridable) + /* if parent has overridable prog attached, only + * allow overridable programs in descendent cgroup + */ + return -EPERM; - effective = (!prog && parent) ? - rcu_dereference_protected(parent->bpf.effective[type], - lockdep_is_held(&cgroup_mutex)) : - prog; + old_prog = cgrp->bpf.prog[type]; + + if (prog) { + overridable = new_overridable; + effective = prog; + if (old_prog && + cgrp->bpf.disallow_override[type] == new_overridable) + /* disallow attaching non-overridable on top + * of existing overridable in this cgroup + * and vice versa + */ + return -EPERM; + } + + if (!prog && !old_prog) + /* report error when trying to detach and nothing is attached */ + return -ENOENT; + + cgrp->bpf.prog[type] = prog; css_for_each_descendant_pre(pos, &cgrp->self) { struct cgroup *desc = container_of(pos, struct cgroup, self); /* skip the subtree if the descendant has its own program */ - if (desc->bpf.prog[type] && desc != cgrp) + if (desc->bpf.prog[type] && desc != cgrp) { pos = css_rightmost_descendant(pos); - else + } else { rcu_assign_pointer(desc->bpf.effective[type], effective); + desc->bpf.disallow_override[type] = !overridable; + } } if (prog) @@ -115,6 +149,7 @@ void __cgroup_bpf_update(struct cgroup *cgrp, bpf_prog_put(old_prog); static_branch_dec(&cgroup_bpf_enabled_key); } + return 0; } /** diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index fddd76b1b627..f45827e205d3 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -28,6 +28,9 @@ #include <linux/moduleloader.h> #include <linux/bpf.h> #include <linux/frame.h> +#include <linux/rbtree_latch.h> +#include <linux/kallsyms.h> +#include <linux/rcupdate.h> #include <asm/unaligned.h> @@ -95,6 +98,8 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) fp->aux = aux; fp->aux->prog = fp; + INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode); + return fp; } EXPORT_SYMBOL_GPL(bpf_prog_alloc); @@ -290,6 +295,206 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, } #ifdef CONFIG_BPF_JIT +static __always_inline void +bpf_get_prog_addr_region(const struct bpf_prog *prog, + unsigned long *symbol_start, + unsigned long *symbol_end) +{ + const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); + unsigned long addr = (unsigned long)hdr; + + WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); + + *symbol_start = addr; + *symbol_end = addr + hdr->pages * PAGE_SIZE; +} + +static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) +{ + BUILD_BUG_ON(sizeof("bpf_prog_") + + sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN); + + sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); + sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); + *sym = 0; +} + +static __always_inline unsigned long +bpf_get_prog_addr_start(struct latch_tree_node *n) +{ + unsigned long symbol_start, symbol_end; + const struct bpf_prog_aux *aux; + + aux = container_of(n, struct bpf_prog_aux, ksym_tnode); + bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); + + return symbol_start; +} + +static __always_inline bool bpf_tree_less(struct latch_tree_node *a, + struct latch_tree_node *b) +{ + return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b); +} + +static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) +{ + unsigned long val = (unsigned long)key; + unsigned long symbol_start, symbol_end; + const struct bpf_prog_aux *aux; + + aux = container_of(n, struct bpf_prog_aux, ksym_tnode); + bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); + + if (val < symbol_start) + return -1; + if (val >= symbol_end) + return 1; + + return 0; +} + +static const struct latch_tree_ops bpf_tree_ops = { + .less = bpf_tree_less, + .comp = bpf_tree_comp, +}; + +static DEFINE_SPINLOCK(bpf_lock); +static LIST_HEAD(bpf_kallsyms); +static struct latch_tree_root bpf_tree __cacheline_aligned; + +int bpf_jit_kallsyms __read_mostly; + +static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) +{ + WARN_ON_ONCE(!list_empty(&aux->ksym_lnode)); + list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms); + latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); +} + +static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux) +{ + if (list_empty(&aux->ksym_lnode)) + return; + + latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); + list_del_rcu(&aux->ksym_lnode); +} + +static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) +{ + return fp->jited && !bpf_prog_was_classic(fp); +} + +static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) +{ + return list_empty(&fp->aux->ksym_lnode) || + fp->aux->ksym_lnode.prev == LIST_POISON2; +} + +void bpf_prog_kallsyms_add(struct bpf_prog *fp) +{ + unsigned long flags; + + if (!bpf_prog_kallsyms_candidate(fp) || + !capable(CAP_SYS_ADMIN)) + return; + + spin_lock_irqsave(&bpf_lock, flags); + bpf_prog_ksym_node_add(fp->aux); + spin_unlock_irqrestore(&bpf_lock, flags); +} + +void bpf_prog_kallsyms_del(struct bpf_prog *fp) +{ + unsigned long flags; + + if (!bpf_prog_kallsyms_candidate(fp)) + return; + + spin_lock_irqsave(&bpf_lock, flags); + bpf_prog_ksym_node_del(fp->aux); + spin_unlock_irqrestore(&bpf_lock, flags); +} + +static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) +{ + struct latch_tree_node *n; + + if (!bpf_jit_kallsyms_enabled()) + return NULL; + + n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); + return n ? + container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : + NULL; +} + +const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char *sym) +{ + unsigned long symbol_start, symbol_end; + struct bpf_prog *prog; + char *ret = NULL; + + rcu_read_lock(); + prog = bpf_prog_kallsyms_find(addr); + if (prog) { + bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end); + bpf_get_prog_name(prog, sym); + + ret = sym; + if (size) + *size = symbol_end - symbol_start; + if (off) + *off = addr - symbol_start; + } + rcu_read_unlock(); + + return ret; +} + +bool is_bpf_text_address(unsigned long addr) +{ + bool ret; + + rcu_read_lock(); + ret = bpf_prog_kallsyms_find(addr) != NULL; + rcu_read_unlock(); + + return ret; +} + +int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym) +{ + unsigned long symbol_start, symbol_end; + struct bpf_prog_aux *aux; + unsigned int it = 0; + int ret = -ERANGE; + + if (!bpf_jit_kallsyms_enabled()) + return ret; + + rcu_read_lock(); + list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) { + if (it++ != symnum) + continue; + + bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); + bpf_get_prog_name(aux->prog, sym); + + *value = symbol_start; + *type = BPF_SYM_ELF_TYPE; + + ret = 0; + break; + } + rcu_read_unlock(); + + return ret; +} + struct bpf_binary_header * bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, unsigned int alignment, @@ -326,6 +531,24 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr) module_memfree(hdr); } +/* This symbol is only overridden by archs that have different + * requirements than the usual eBPF JITs, f.e. when they only + * implement cBPF JIT, do not set images read-only, etc. + */ +void __weak bpf_jit_free(struct bpf_prog *fp) +{ + if (fp->jited) { + struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); + + bpf_jit_binary_unlock_ro(hdr); + bpf_jit_binary_free(hdr); + + WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); + } + + bpf_prog_unlock_free(fp); +} + int bpf_jit_harden __read_mostly; static int bpf_jit_blind_insn(const struct bpf_insn *from, @@ -1154,12 +1377,22 @@ const struct bpf_func_proto bpf_tail_call_proto = { .arg3_type = ARG_ANYTHING, }; -/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */ +/* Stub for JITs that only support cBPF. eBPF programs are interpreted. + * It is encouraged to implement bpf_int_jit_compile() instead, so that + * eBPF and implicitly also cBPF can get JITed! + */ struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) { return prog; } +/* Stub for JITs that support eBPF. All cBPF code gets transformed into + * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). + */ +void __weak bpf_jit_compile(struct bpf_prog *prog) +{ +} + bool __weak bpf_helper_changes_pkt_data(void *func) { return false; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index a753bbe7df0a..3ea87fb19a94 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1023,7 +1023,7 @@ static const struct bpf_map_ops htab_ops = { .map_delete_elem = htab_map_delete_elem, }; -static struct bpf_map_type_list htab_type __read_mostly = { +static struct bpf_map_type_list htab_type __ro_after_init = { .ops = &htab_ops, .type = BPF_MAP_TYPE_HASH, }; @@ -1037,7 +1037,7 @@ static const struct bpf_map_ops htab_lru_ops = { .map_delete_elem = htab_lru_map_delete_elem, }; -static struct bpf_map_type_list htab_lru_type __read_mostly = { +static struct bpf_map_type_list htab_lru_type __ro_after_init = { .ops = &htab_lru_ops, .type = BPF_MAP_TYPE_LRU_HASH, }; @@ -1124,7 +1124,7 @@ static const struct bpf_map_ops htab_percpu_ops = { .map_delete_elem = htab_map_delete_elem, }; -static struct bpf_map_type_list htab_percpu_type __read_mostly = { +static struct bpf_map_type_list htab_percpu_type __ro_after_init = { .ops = &htab_percpu_ops, .type = BPF_MAP_TYPE_PERCPU_HASH, }; @@ -1138,7 +1138,7 @@ static const struct bpf_map_ops htab_lru_percpu_ops = { .map_delete_elem = htab_lru_map_delete_elem, }; -static struct bpf_map_type_list htab_lru_percpu_type __read_mostly = { +static struct bpf_map_type_list htab_lru_percpu_type __ro_after_init = { .ops = &htab_lru_percpu_ops, .type = BPF_MAP_TYPE_LRU_PERCPU_HASH, }; diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index e0f6a0bd279b..8bfe0afaee10 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -508,7 +508,7 @@ static const struct bpf_map_ops trie_ops = { .map_delete_elem = trie_delete_elem, }; -static struct bpf_map_type_list trie_type __read_mostly = { +static struct bpf_map_type_list trie_type __ro_after_init = { .ops = &trie_ops, .type = BPF_MAP_TYPE_LPM_TRIE, }; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index be8519148c25..22aa45cd0324 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -273,7 +273,7 @@ static const struct bpf_map_ops stack_map_ops = { .map_delete_elem = stack_map_delete_elem, }; -static struct bpf_map_type_list stack_map_type __read_mostly = { +static struct bpf_map_type_list stack_map_type __ro_after_init = { .ops = &stack_map_ops, .type = BPF_MAP_TYPE_STACK_TRACE, }; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 08a4d287226b..461eb1e66a0f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -707,6 +707,7 @@ void bpf_prog_put(struct bpf_prog *prog) { if (atomic_dec_and_test(&prog->aux->refcnt)) { trace_bpf_prog_put_rcu(prog); + bpf_prog_kallsyms_del(prog); call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); } } @@ -903,6 +904,7 @@ static int bpf_prog_load(union bpf_attr *attr) /* failed to allocate fd */ goto free_used_maps; + bpf_prog_kallsyms_add(prog); trace_bpf_prog_load(prog, err); return err; @@ -935,13 +937,14 @@ static int bpf_obj_get(const union bpf_attr *attr) #ifdef CONFIG_CGROUP_BPF -#define BPF_PROG_ATTACH_LAST_FIELD attach_type +#define BPF_PROG_ATTACH_LAST_FIELD attach_flags static int bpf_prog_attach(const union bpf_attr *attr) { + enum bpf_prog_type ptype; struct bpf_prog *prog; struct cgroup *cgrp; - enum bpf_prog_type ptype; + int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -949,6 +952,9 @@ static int bpf_prog_attach(const union bpf_attr *attr) if (CHECK_ATTR(BPF_PROG_ATTACH)) return -EINVAL; + if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE) + return -EINVAL; + switch (attr->attach_type) { case BPF_CGROUP_INET_INGRESS: case BPF_CGROUP_INET_EGRESS: @@ -971,10 +977,13 @@ static int bpf_prog_attach(const union bpf_attr *attr) return PTR_ERR(cgrp); } - cgroup_bpf_update(cgrp, prog, attr->attach_type); + ret = cgroup_bpf_update(cgrp, prog, attr->attach_type, + attr->attach_flags & BPF_F_ALLOW_OVERRIDE); + if (ret) + bpf_prog_put(prog); cgroup_put(cgrp); - return 0; + return ret; } #define BPF_PROG_DETACH_LAST_FIELD attach_type @@ -982,6 +991,7 @@ static int bpf_prog_attach(const union bpf_attr *attr) static int bpf_prog_detach(const union bpf_attr *attr) { struct cgroup *cgrp; + int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -997,7 +1007,7 @@ static int bpf_prog_detach(const union bpf_attr *attr) if (IS_ERR(cgrp)) return PTR_ERR(cgrp); - cgroup_bpf_update(cgrp, NULL, attr->attach_type); + ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false); cgroup_put(cgrp); break; @@ -1005,7 +1015,7 @@ static int bpf_prog_detach(const union bpf_attr *attr) return -EINVAL; } - return 0; + return ret; } #endif /* CONFIG_CGROUP_BPF */ diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 688dd02af985..53bbca7c4859 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -6498,15 +6498,16 @@ static __init int cgroup_namespaces_init(void) subsys_initcall(cgroup_namespaces_init); #ifdef CONFIG_CGROUP_BPF -void cgroup_bpf_update(struct cgroup *cgrp, - struct bpf_prog *prog, - enum bpf_attach_type type) +int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog, + enum bpf_attach_type type, bool overridable) { struct cgroup *parent = cgroup_parent(cgrp); + int ret; mutex_lock(&cgroup_mutex); - __cgroup_bpf_update(cgrp, parent, prog, type); + ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable); mutex_unlock(&cgroup_mutex); + return ret; } #endif /* CONFIG_CGROUP_BPF */ diff --git a/kernel/events/core.c b/kernel/events/core.c index e5aaa806702d..e235bb991bdd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3487,14 +3487,15 @@ struct perf_read_data { int ret; }; -static int find_cpu_to_read(struct perf_event *event, int local_cpu) +static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) { - int event_cpu = event->oncpu; u16 local_pkg, event_pkg; if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { - event_pkg = topology_physical_package_id(event_cpu); - local_pkg = topology_physical_package_id(local_cpu); + int local_cpu = smp_processor_id(); + + event_pkg = topology_physical_package_id(event_cpu); + local_pkg = topology_physical_package_id(local_cpu); if (event_pkg == local_pkg) return local_cpu; @@ -3624,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event) static int perf_event_read(struct perf_event *event, bool group) { - int ret = 0, cpu_to_read, local_cpu; + int event_cpu, ret = 0; /* * If event is enabled and currently active on a CPU, update the @@ -3637,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group) .ret = 0, }; - local_cpu = get_cpu(); - cpu_to_read = find_cpu_to_read(event, local_cpu); - put_cpu(); + event_cpu = READ_ONCE(event->oncpu); + if ((unsigned)event_cpu >= nr_cpu_ids) + return 0; + + preempt_disable(); + event_cpu = __perf_event_read_cpu(event, event_cpu); /* * Purposely ignore the smp_call_function_single() return * value. * - * If event->oncpu isn't a valid CPU it means the event got + * If event_cpu isn't a valid CPU it means the event got * scheduled out and that will have updated the event count. * * Therefore, either way, we'll have an up-to-date event count * after this. */ - (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); + (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); + preempt_enable(); ret = data.ret; } else if (event->state == PERF_EVENT_STATE_INACTIVE) { struct perf_event_context *ctx = event->ctx; diff --git a/kernel/extable.c b/kernel/extable.c index e3beec4a2339..bd82117ad424 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/init.h> +#include <linux/filter.h> #include <asm/sections.h> #include <linux/uaccess.h> @@ -104,6 +105,8 @@ int __kernel_text_address(unsigned long addr) return 1; if (is_ftrace_trampoline(addr)) return 1; + if (is_bpf_text_address(addr)) + return 1; /* * There might be init symbols in saved stacktraces. * Give those symbols a chance to be printed in @@ -123,7 +126,11 @@ int kernel_text_address(unsigned long addr) return 1; if (is_module_text_address(addr)) return 1; - return is_ftrace_trampoline(addr); + if (is_ftrace_trampoline(addr)) + return 1; + if (is_bpf_text_address(addr)) + return 1; + return 0; } /* diff --git a/kernel/futex.c b/kernel/futex.c index 0842c8ca534b..cdf365036141 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -3323,4 +3323,4 @@ static int __init futex_init(void) return 0; } -__initcall(futex_init); +core_initcall(futex_init); diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index fafd1a3ef0da..6a3b249a2ae1 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -23,6 +23,7 @@ #include <linux/mm.h> #include <linux/ctype.h> #include <linux/slab.h> +#include <linux/filter.h> #include <linux/compiler.h> #include <asm/sections.h> @@ -300,10 +301,11 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { char namebuf[KSYM_NAME_LEN]; + if (is_ksym_addr(addr)) return !!get_symbol_pos(addr, symbolsize, offset); - - return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf); + return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || + !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); } /* @@ -318,6 +320,8 @@ const char *kallsyms_lookup(unsigned long addr, unsigned long *offset, char **modname, char *namebuf) { + const char *ret; + namebuf[KSYM_NAME_LEN - 1] = 0; namebuf[0] = 0; @@ -333,9 +337,13 @@ const char *kallsyms_lookup(unsigned long addr, return namebuf; } - /* See if it's in a module. */ - return module_address_lookup(addr, symbolsize, offset, modname, - namebuf); + /* See if it's in a module or a BPF JITed image. */ + ret = module_address_lookup(addr, symbolsize, offset, + modname, namebuf); + if (!ret) + ret = bpf_address_lookup(addr, symbolsize, + offset, modname, namebuf); + return ret; } int lookup_symbol_name(unsigned long addr, char *symname) @@ -471,6 +479,7 @@ EXPORT_SYMBOL(__print_symbol); /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ struct kallsym_iter { loff_t pos; + loff_t pos_mod_end; unsigned long value; unsigned int nameoff; /* If iterating in core kernel symbols. */ char type; @@ -481,13 +490,27 @@ struct kallsym_iter { static int get_ksymbol_mod(struct kallsym_iter *iter) { - if (module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value, - &iter->type, iter->name, iter->module_name, - &iter->exported) < 0) + int ret = module_get_kallsym(iter->pos - kallsyms_num_syms, + &iter->value, &iter->type, + iter->name, iter->module_name, + &iter->exported); + if (ret < 0) { + iter->pos_mod_end = iter->pos; return 0; + } + return 1; } +static int get_ksymbol_bpf(struct kallsym_iter *iter) +{ + iter->module_name[0] = '\0'; + iter->exported = 0; + return bpf_get_kallsym(iter->pos - iter->pos_mod_end, + &iter->value, &iter->type, + iter->name) < 0 ? 0 : 1; +} + /* Returns space to next name. */ static unsigned long get_ksymbol_core(struct kallsym_iter *iter) { @@ -508,16 +531,30 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) iter->name[0] = '\0'; iter->nameoff = get_symbol_offset(new_pos); iter->pos = new_pos; + if (new_pos == 0) + iter->pos_mod_end = 0; +} + +static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) +{ + iter->pos = pos; + + if (iter->pos_mod_end > 0 && + iter->pos_mod_end < iter->pos) + return get_ksymbol_bpf(iter); + + if (!get_ksymbol_mod(iter)) + return get_ksymbol_bpf(iter); + + return 1; } /* Returns false if pos at or past end of file. */ static int update_iter(struct kallsym_iter *iter, loff_t pos) { /* Module symbols can be accessed randomly. */ - if (pos >= kallsyms_num_syms) { - iter->pos = pos; - return get_ksymbol_mod(iter); - } + if (pos >= kallsyms_num_syms) + return update_iter_mod(iter, pos); /* If we're not on the desired position, reset to new position. */ if (pos != iter->pos) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 8b2696420abb..4ba3d34938c0 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1516,7 +1516,7 @@ static void call_console_drivers(int level, { struct console *con; - trace_console(text, len); + trace_console_rcuidle(text, len); if (!console_drivers) return; diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index b6e4c16377c7..9c15a9124e83 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c @@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trace *trace, int spaces) if (WARN_ON(!trace->entries)) return; - for (i = 0; i < trace->nr_entries; i++) { - printk("%*c", 1 + spaces, ' '); - print_ip_sym(trace->entries[i]); - } + for (i = 0; i < trace->nr_entries; i++) + printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]); } EXPORT_SYMBOL_GPL(print_stack_trace); @@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_t size, struct stack_trace *trace, int spaces) { int i; - unsigned long ip; int generated; int total = 0; @@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_t size, return 0; for (i = 0; i < trace->nr_entries; i++) { - ip = trace->entries[i]; - generated = snprintf(buf, size, "%*c[<%p>] %pS\n", - 1 + spaces, ' ', (void *) ip, (void *) ip); + generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ', + (void *)trace->entries[i]); total += generated; diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 3109204c87cc..17ac99b60ee5 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -347,17 +347,16 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) * * Called when the system enters a state where affected tick devices * might stop. Note: TICK_BROADCAST_FORCE cannot be undone. - * - * Called with interrupts disabled, so clockevents_lock is not - * required here because the local clock event device cannot go away - * under us. */ void tick_broadcast_control(enum tick_broadcast_mode mode) { struct clock_event_device *bc, *dev; struct tick_device *td; int cpu, bc_stopped; + unsigned long flags; + /* Protects also the local clockevent device. */ + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); td = this_cpu_ptr(&tick_cpu_device); dev = td->evtdev; @@ -365,12 +364,11 @@ void tick_broadcast_control(enum tick_broadcast_mode mode) * Is the device not affected by the powerstate ? */ if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) - return; + goto out; if (!tick_device_is_functional(dev)) - return; + goto out; - raw_spin_lock(&tick_broadcast_lock); cpu = smp_processor_id(); bc = tick_broadcast_device.evtdev; bc_stopped = cpumask_empty(tick_broadcast_mask); @@ -420,7 +418,8 @@ void tick_broadcast_control(enum tick_broadcast_mode mode) tick_broadcast_setup_oneshot(bc); } } - raw_spin_unlock(&tick_broadcast_lock); +out: + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } EXPORT_SYMBOL_GPL(tick_broadcast_control); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 74e0388cc88d..2c115fdab397 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -767,7 +767,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, tick = expires; /* Skip reprogram of event if its not changed */ - if (ts->tick_stopped && (expires == ts->next_tick)) + if (ts->tick_stopped && (expires == dev->next_event)) goto out; /* @@ -787,8 +787,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, trace_tick_stop(1, TICK_DEP_MASK_NONE); } - ts->next_tick = tick; - /* * If the expiration time == KTIME_MAX, then we simply stop * the tick timer. @@ -804,10 +802,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, else tick_program_event(tick, 1); out: - /* - * Update the estimated sleep length until the next timer - * (not only the tick). - */ + /* Update the estimated sleep length */ ts->sleep_length = ktime_sub(dev->next_event, now); return tick; } diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 075444e3d48e..bf38226e5c17 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -27,7 +27,6 @@ enum tick_nohz_mode { * timer is modified for nohz sleeps. This is necessary * to resume the tick timer operation in the timeline * when the CPU returns from nohz sleep. - * @next_tick: Next tick to be fired when in dynticks mode. * @tick_stopped: Indicator that the idle tick has been stopped * @idle_jiffies: jiffies at the entry to idle for idle time accounting * @idle_calls: Total number of idle calls @@ -45,7 +44,6 @@ struct tick_sched { unsigned long check_clocks; enum tick_nohz_mode nohz_mode; ktime_t last_tick; - ktime_t next_tick; int inidle; int tick_stopped; unsigned long idle_jiffies; diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c index ca9fb800336b..38bc4d2208e8 100644 --- a/kernel/time/timekeeping_debug.c +++ b/kernel/time/timekeeping_debug.c @@ -75,7 +75,7 @@ void tk_debug_account_sleep_time(struct timespec64 *t) int bin = min(fls(t->tv_sec), NUM_BINS-1); sleep_time_bin[bin]++; - pr_info("Suspended for %lld.%03lu seconds\n", (s64)t->tv_sec, - t->tv_nsec / NSEC_PER_MSEC); + printk_deferred(KERN_INFO "Suspended for %lld.%03lu seconds\n", + (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC); } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 424daa4586d1..cee9802cf3e0 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -506,7 +506,7 @@ static const struct bpf_verifier_ops kprobe_prog_ops = { .is_valid_access = kprobe_prog_is_valid_access, }; -static struct bpf_prog_type_list kprobe_tl = { +static struct bpf_prog_type_list kprobe_tl __ro_after_init = { .ops = &kprobe_prog_ops, .type = BPF_PROG_TYPE_KPROBE, }; @@ -589,7 +589,7 @@ static const struct bpf_verifier_ops tracepoint_prog_ops = { .is_valid_access = tp_prog_is_valid_access, }; -static struct bpf_prog_type_list tracepoint_tl = { +static struct bpf_prog_type_list tracepoint_tl __ro_after_init = { .ops = &tracepoint_prog_ops, .type = BPF_PROG_TYPE_TRACEPOINT, }; @@ -648,7 +648,7 @@ static const struct bpf_verifier_ops perf_event_prog_ops = { .convert_ctx_access = pe_prog_convert_ctx_access, }; -static struct bpf_prog_type_list perf_event_tl = { +static struct bpf_prog_type_list perf_event_tl __ro_after_init = { .ops = &perf_event_prog_ops, .type = BPF_PROG_TYPE_PERF_EVENT, }; diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 32d0ad058380..172454e6b979 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -32,6 +32,11 @@ #define HASH_MIN_SIZE 4U #define BUCKET_LOCKS_PER_CPU 32UL +union nested_table { + union nested_table __rcu *table; + struct rhash_head __rcu *bucket; +}; + static u32 head_hashfn(struct rhashtable *ht, const struct bucket_table *tbl, const struct rhash_head *he) @@ -76,6 +81,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, /* Never allocate more than 0.5 locks per bucket */ size = min_t(unsigned int, size, tbl->size >> 1); + if (tbl->nest) + size = min(size, 1U << tbl->nest); + if (sizeof(spinlock_t) != 0) { tbl->locks = NULL; #ifdef CONFIG_NUMA @@ -99,8 +107,45 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, return 0; } +static void nested_table_free(union nested_table *ntbl, unsigned int size) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + const unsigned int len = 1 << shift; + unsigned int i; + + ntbl = rcu_dereference_raw(ntbl->table); + if (!ntbl) + return; + + if (size > len) { + size >>= shift; + for (i = 0; i < len; i++) + nested_table_free(ntbl + i, size); + } + + kfree(ntbl); +} + +static void nested_bucket_table_free(const struct bucket_table *tbl) +{ + unsigned int size = tbl->size >> tbl->nest; + unsigned int len = 1 << tbl->nest; + union nested_table *ntbl; + unsigned int i; + + ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + + for (i = 0; i < len; i++) + nested_table_free(ntbl + i, size); + + kfree(ntbl); +} + static void bucket_table_free(const struct bucket_table *tbl) { + if (tbl->nest) + nested_bucket_table_free(tbl); + if (tbl) kvfree(tbl->locks); @@ -112,6 +157,59 @@ static void bucket_table_free_rcu(struct rcu_head *head) bucket_table_free(container_of(head, struct bucket_table, rcu)); } +static union nested_table *nested_table_alloc(struct rhashtable *ht, + union nested_table __rcu **prev, + unsigned int shifted, + unsigned int nhash) +{ + union nested_table *ntbl; + int i; + + ntbl = rcu_dereference(*prev); + if (ntbl) + return ntbl; + + ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); + + if (ntbl && shifted) { + for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) + INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, + (i << shifted) | nhash); + } + + rcu_assign_pointer(*prev, ntbl); + + return ntbl; +} + +static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, + size_t nbuckets, + gfp_t gfp) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + struct bucket_table *tbl; + size_t size; + + if (nbuckets < (1 << (shift + 1))) + return NULL; + + size = sizeof(*tbl) + sizeof(tbl->buckets[0]); + + tbl = kzalloc(size, gfp); + if (!tbl) + return NULL; + + if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, + 0, 0)) { + kfree(tbl); + return NULL; + } + + tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; + + return tbl; +} + static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) @@ -126,10 +224,17 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); if (tbl == NULL && gfp == GFP_KERNEL) tbl = vzalloc(size); + + size = nbuckets; + + if (tbl == NULL && gfp != GFP_KERNEL) { + tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); + nbuckets = 0; + } if (tbl == NULL) return NULL; - tbl->size = nbuckets; + tbl->size = size; if (alloc_bucket_locks(ht, tbl, gfp) < 0) { bucket_table_free(tbl); @@ -164,12 +269,17 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl = rhashtable_last_table(ht, rht_dereference_rcu(old_tbl->future_tbl, ht)); - struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; - int err = -ENOENT; + struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); + int err = -EAGAIN; struct rhash_head *head, *next, *entry; spinlock_t *new_bucket_lock; unsigned int new_hash; + if (new_tbl->nest) + goto out; + + err = -ENOENT; + rht_for_each(entry, old_tbl, old_hash) { err = 0; next = rht_dereference_bucket(entry->next, old_tbl, old_hash); @@ -202,19 +312,26 @@ out: return err; } -static void rhashtable_rehash_chain(struct rhashtable *ht, +static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); spinlock_t *old_bucket_lock; + int err; old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); spin_lock_bh(old_bucket_lock); - while (!rhashtable_rehash_one(ht, old_hash)) + while (!(err = rhashtable_rehash_one(ht, old_hash))) ; - old_tbl->rehash++; + + if (err == -ENOENT) { + old_tbl->rehash++; + err = 0; + } spin_unlock_bh(old_bucket_lock); + + return err; } static int rhashtable_rehash_attach(struct rhashtable *ht, @@ -246,13 +363,17 @@ static int rhashtable_rehash_table(struct rhashtable *ht) struct bucket_table *new_tbl; struct rhashtable_walker *walker; unsigned int old_hash; + int err; new_tbl = rht_dereference(old_tbl->future_tbl, ht); if (!new_tbl) return 0; - for (old_hash = 0; old_hash < old_tbl->size; old_hash++) - rhashtable_rehash_chain(ht, old_hash); + for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { + err = rhashtable_rehash_chain(ht, old_hash); + if (err) + return err; + } /* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); @@ -271,31 +392,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht) return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; } -/** - * rhashtable_expand - Expand hash table while allowing concurrent lookups - * @ht: the hash table to expand - * - * A secondary bucket array is allocated and the hash entries are migrated. - * - * This function may only be called in a context where it is safe to call - * synchronize_rcu(), e.g. not within a rcu_read_lock() section. - * - * The caller must ensure that no concurrent resizing occurs by holding - * ht->mutex. - * - * It is valid to have concurrent insertions and deletions protected by per - * bucket locks or concurrent RCU protected lookups and traversals. - */ -static int rhashtable_expand(struct rhashtable *ht) +static int rhashtable_rehash_alloc(struct rhashtable *ht, + struct bucket_table *old_tbl, + unsigned int size) { - struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *new_tbl; int err; ASSERT_RHT_MUTEX(ht); - old_tbl = rhashtable_last_table(ht, old_tbl); - - new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); + new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (new_tbl == NULL) return -ENOMEM; @@ -324,12 +430,9 @@ static int rhashtable_expand(struct rhashtable *ht) */ static int rhashtable_shrink(struct rhashtable *ht) { - struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); + struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); unsigned int nelems = atomic_read(&ht->nelems); unsigned int size = 0; - int err; - - ASSERT_RHT_MUTEX(ht); if (nelems) size = roundup_pow_of_two(nelems * 3 / 2); @@ -342,15 +445,7 @@ static int rhashtable_shrink(struct rhashtable *ht) if (rht_dereference(old_tbl->future_tbl, ht)) return -EEXIST; - new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); - if (new_tbl == NULL) - return -ENOMEM; - - err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); - if (err) - bucket_table_free(new_tbl); - - return err; + return rhashtable_rehash_alloc(ht, old_tbl, size); } static void rht_deferred_worker(struct work_struct *work) @@ -366,11 +461,14 @@ static void rht_deferred_worker(struct work_struct *work) tbl = rhashtable_last_table(ht, tbl); if (rht_grow_above_75(ht, tbl)) - rhashtable_expand(ht); + err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) - rhashtable_shrink(ht); + err = rhashtable_shrink(ht); + else if (tbl->nest) + err = rhashtable_rehash_alloc(ht, tbl, tbl->size); - err = rhashtable_rehash_table(ht); + if (!err) + err = rhashtable_rehash_table(ht); mutex_unlock(&ht->mutex); @@ -439,8 +537,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, int elasticity; elasticity = ht->elasticity; - pprev = &tbl->buckets[hash]; - rht_for_each(head, tbl, hash) { + pprev = rht_bucket_var(tbl, hash); + rht_for_each_continue(head, *pprev, tbl, hash) { struct rhlist_head *list; struct rhlist_head *plist; @@ -477,6 +575,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, struct rhash_head *obj, void *data) { + struct rhash_head __rcu **pprev; struct bucket_table *new_tbl; struct rhash_head *head; @@ -499,7 +598,11 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, if (unlikely(rht_grow_above_100(ht, tbl))) return ERR_PTR(-EAGAIN); - head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); + pprev = rht_bucket_insert(ht, tbl, hash); + if (!pprev) + return ERR_PTR(-ENOMEM); + + head = rht_dereference_bucket(*pprev, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (ht->rhlist) { @@ -509,7 +612,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, RCU_INIT_POINTER(list->next, NULL); } - rcu_assign_pointer(tbl->buckets[hash], obj); + rcu_assign_pointer(*pprev, obj); atomic_inc(&ht->nelems); if (rht_grow_above_75(ht, tbl)) @@ -975,7 +1078,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg) { - const struct bucket_table *tbl; + struct bucket_table *tbl; unsigned int i; cancel_work_sync(&ht->run_work); @@ -986,7 +1089,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; - for (pos = rht_dereference(tbl->buckets[i], ht), + for (pos = rht_dereference(*rht_bucket(tbl, i), ht), next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; !rht_is_a_nulls(pos); @@ -1007,3 +1110,70 @@ void rhashtable_destroy(struct rhashtable *ht) return rhashtable_free_and_destroy(ht, NULL, NULL); } EXPORT_SYMBOL_GPL(rhashtable_destroy); + +struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, + unsigned int hash) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + static struct rhash_head __rcu *rhnull = + (struct rhash_head __rcu *)NULLS_MARKER(0); + unsigned int index = hash & ((1 << tbl->nest) - 1); + unsigned int size = tbl->size >> tbl->nest; + unsigned int subhash = hash; + union nested_table *ntbl; + + ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash); + subhash >>= tbl->nest; + + while (ntbl && size > (1 << shift)) { + index = subhash & ((1 << shift) - 1); + ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash); + size >>= shift; + subhash >>= shift; + } + + if (!ntbl) + return &rhnull; + + return &ntbl[subhash].bucket; + +} +EXPORT_SYMBOL_GPL(rht_bucket_nested); + +struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, + struct bucket_table *tbl, + unsigned int hash) +{ + const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); + unsigned int index = hash & ((1 << tbl->nest) - 1); + unsigned int size = tbl->size >> tbl->nest; + union nested_table *ntbl; + unsigned int shifted; + unsigned int nhash; + + ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); + hash >>= tbl->nest; + nhash = index; + shifted = tbl->nest; + ntbl = nested_table_alloc(ht, &ntbl[index].table, + size <= (1 << shift) ? shifted : 0, nhash); + + while (ntbl && size > (1 << shift)) { + index = hash & ((1 << shift) - 1); + size >>= shift; + hash >>= shift; + nhash |= index << shifted; + shifted += shift; + ntbl = nested_table_alloc(ht, &ntbl[index].table, + size <= (1 << shift) ? shifted : 0, + nhash); + } + + if (!ntbl) + return NULL; + + return &ntbl[hash].bucket; + +} +EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); diff --git a/net/Kconfig b/net/Kconfig index f19c0c3b9589..102f781a0131 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -297,7 +297,8 @@ config BPF_JIT Note, admin should enable this feature changing: /proc/sys/net/core/bpf_jit_enable - /proc/sys/net/core/bpf_jit_harden (optional) + /proc/sys/net/core/bpf_jit_harden (optional) + /proc/sys/net/core/bpf_jit_kallsyms (optional) config NET_FLOW_LIMIT bool diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 4ac11574117c..4f598dc2d916 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -65,7 +65,7 @@ static inline unsigned long hold_time(const struct net_bridge *br) static inline int has_expired(const struct net_bridge *br, const struct net_bridge_fdb_entry *fdb) { - return !fdb->is_static && + return !fdb->is_static && !fdb->added_by_external_learn && time_before_eq(fdb->updated + hold_time(br), jiffies); } diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c index b2b79a070162..6d2c4eed2dc8 100644 --- a/net/bridge/br_vlan_tunnel.c +++ b/net/bridge/br_vlan_tunnel.c @@ -85,6 +85,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg, return 0; out: dst_release(&vlan->tinfo.tunnel_dst->dst); + vlan->tinfo.tunnel_dst = NULL; + vlan->tinfo.tunnel_id = 0; return err; } diff --git a/net/core/dev.c b/net/core/dev.c index 2f1bbe1bf67c..05d19c6acf94 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4510,6 +4510,11 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (&ptype->list == head) goto normal; + if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) { + ret = GRO_CONSUMED; + goto ok; + } + same_flow = NAPI_GRO_CB(skb)->same_flow; ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; @@ -4614,6 +4619,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) case GRO_HELD: case GRO_MERGED: + case GRO_CONSUMED: break; } @@ -4685,6 +4691,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, break; case GRO_MERGED: + case GRO_CONSUMED: break; } diff --git a/net/core/filter.c b/net/core/filter.c index 0b753cbb2536..e466e0040137 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3296,47 +3296,47 @@ static const struct bpf_verifier_ops cg_sock_ops = { .convert_ctx_access = sock_filter_convert_ctx_access, }; -static struct bpf_prog_type_list sk_filter_type __read_mostly = { +static struct bpf_prog_type_list sk_filter_type __ro_after_init = { .ops = &sk_filter_ops, .type = BPF_PROG_TYPE_SOCKET_FILTER, }; -static struct bpf_prog_type_list sched_cls_type __read_mostly = { +static struct bpf_prog_type_list sched_cls_type __ro_after_init = { .ops = &tc_cls_act_ops, .type = BPF_PROG_TYPE_SCHED_CLS, }; -static struct bpf_prog_type_list sched_act_type __read_mostly = { +static struct bpf_prog_type_list sched_act_type __ro_after_init = { .ops = &tc_cls_act_ops, .type = BPF_PROG_TYPE_SCHED_ACT, }; -static struct bpf_prog_type_list xdp_type __read_mostly = { +static struct bpf_prog_type_list xdp_type __ro_after_init = { .ops = &xdp_ops, .type = BPF_PROG_TYPE_XDP, }; -static struct bpf_prog_type_list cg_skb_type __read_mostly = { +static struct bpf_prog_type_list cg_skb_type __ro_after_init = { .ops = &cg_skb_ops, .type = BPF_PROG_TYPE_CGROUP_SKB, }; -static struct bpf_prog_type_list lwt_in_type __read_mostly = { +static struct bpf_prog_type_list lwt_in_type __ro_after_init = { .ops = &lwt_inout_ops, .type = BPF_PROG_TYPE_LWT_IN, }; -static struct bpf_prog_type_list lwt_out_type __read_mostly = { +static struct bpf_prog_type_list lwt_out_type __ro_after_init = { .ops = &lwt_inout_ops, .type = BPF_PROG_TYPE_LWT_OUT, }; -static struct bpf_prog_type_list lwt_xmit_type __read_mostly = { +static struct bpf_prog_type_list lwt_xmit_type __ro_after_init = { .ops = &lwt_xmit_ops, .type = BPF_PROG_TYPE_LWT_XMIT, }; -static struct bpf_prog_type_list cg_sock_type __read_mostly = { +static struct bpf_prog_type_list cg_sock_type __ro_after_init = { .ops = &cg_sock_ops, .type = BPF_PROG_TYPE_CGROUP_SOCK }; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 7bb12e07ffef..e7c12caa20c8 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2923,7 +2923,8 @@ static void neigh_proc_update(struct ctl_table *ctl, int write) return; set_bit(index, p->data_state); - call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); + if (index == NEIGH_VAR_DELAY_PROBE_TIME) + call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); if (!dev) /* NULL dev means this is default value */ neigh_copy_dflt_parms(net, p, index); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index adfb54b896da..e3286d32eca5 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -876,8 +876,6 @@ static size_t rtnl_port_size(const struct net_device *dev, { size_t port_size = nla_total_size(4) /* PORT_VF */ + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */ - + nla_total_size(sizeof(struct ifla_port_vsi)) - /* PORT_VSI_TYPE */ + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */ + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */ + nla_total_size(1) /* PROT_VDP_REQUEST */ @@ -1491,14 +1489,19 @@ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { [IFLA_PORT_VF] = { .type = NLA_U32 }, [IFLA_PORT_PROFILE] = { .type = NLA_STRING, .len = PORT_PROFILE_MAX }, - [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, - .len = sizeof(struct ifla_port_vsi)}, [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY, .len = PORT_UUID_MAX }, [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING, .len = PORT_UUID_MAX }, [IFLA_PORT_REQUEST] = { .type = NLA_U8, }, [IFLA_PORT_RESPONSE] = { .type = NLA_U16, }, + + /* Unused, but we need to keep it here since user space could + * fill it. It's also broken with regard to NLA_BINARY use in + * combination with structs. + */ + [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY, + .len = sizeof(struct ifla_port_vsi) }, }; static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = { diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index eaa72eb0399c..4ead336e14ea 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -334,6 +334,13 @@ static struct ctl_table net_core_table[] = { .mode = 0600, .proc_handler = proc_dointvec, }, + { + .procname = "bpf_jit_kallsyms", + .data = &bpf_jit_kallsyms, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec, + }, # endif #endif { diff --git a/net/dccp/input.c b/net/dccp/input.c index ba347184bda9..8fedc2d49770 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, if (inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) < 0) return 1; - goto discard; + consume_skb(skb); + return 0; } if (dh->dccph_type == DCCP_PKT_RESET) goto discard; diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index d21be3b3d9cc..1446810047f5 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -475,7 +475,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head, out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index e30f9caddae8..91a2557942fa 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -361,6 +361,19 @@ config INET_ESP If unsure, say Y. +config INET_ESP_OFFLOAD + tristate "IP: ESP transformation offload" + depends on INET_ESP + select XFRM_OFFLOAD + default n + ---help--- + Support for ESP transformation offload. This makes sense + only if this system really does IPsec and want to do it + with high throughput. A typical desktop system does not + need it, even if it does IPsec. + + If unsure, say N. + config INET_IPCOMP tristate "IP: IPComp transformation" select INET_XFRM_TUNNEL diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 48af58a5686e..c6d4238ff94a 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_NET_IPVTI) += ip_vti.o obj-$(CONFIG_SYN_COOKIES) += syncookies.o obj-$(CONFIG_INET_AH) += ah4.o obj-$(CONFIG_INET_ESP) += esp4.o +obj-$(CONFIG_INET_ESP_OFFLOAD) += esp4_offload.o obj-$(CONFIG_INET_IPCOMP) += ipcomp.o obj-$(CONFIG_INET_XFRM_TUNNEL) += xfrm4_tunnel.o obj-$(CONFIG_INET_XFRM_MODE_BEET) += xfrm4_mode_beet.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 685ba53df2d1..602d40f43687 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1423,7 +1423,7 @@ out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 89a8cac4726a..51b27ae09fbd 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1263,7 +1263,7 @@ void __init arp_init(void) /* * ax25 -> ASCII conversion */ -static char *ax2asc2(ax25_address *a, char *buf) +static void ax2asc2(ax25_address *a, char *buf) { char c, *s; int n; @@ -1285,10 +1285,10 @@ static char *ax2asc2(ax25_address *a, char *buf) *s++ = n + '0'; *s++ = '\0'; - if (*buf == '\0' || *buf == '-') - return "*"; - - return buf; + if (*buf == '\0' || *buf == '-') { + buf[0] = '*'; + buf[1] = '\0'; + } } #endif /* CONFIG_AX25 */ @@ -1322,7 +1322,7 @@ static void arp_format_neigh_entry(struct seq_file *seq, } #endif sprintf(tbuf, "%pI4", n->primary_key); - seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", + seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n", tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); read_unlock(&n->lock); } diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c new file mode 100644 index 000000000000..1de442632406 --- /dev/null +++ b/net/ipv4/esp4_offload.c @@ -0,0 +1,106 @@ +/* + * IPV4 GSO/GRO offload support + * Linux INET implementation + * + * Copyright (C) 2016 secunet Security Networks AG + * Author: Steffen Klassert <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * ESP GRO support + */ + +#include <linux/skbuff.h> +#include <linux/init.h> +#include <net/protocol.h> +#include <crypto/aead.h> +#include <crypto/authenc.h> +#include <linux/err.h> +#include <linux/module.h> +#include <net/ip.h> +#include <net/xfrm.h> +#include <net/esp.h> +#include <linux/scatterlist.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <net/udp.h> + +static struct sk_buff **esp4_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + int offset = skb_gro_offset(skb); + struct xfrm_offload *xo; + struct xfrm_state *x; + __be32 seq; + __be32 spi; + int err; + + skb_pull(skb, offset); + + if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) + goto out; + + err = secpath_set(skb); + if (err) + goto out; + + if (skb->sp->len == XFRM_MAX_DEPTH) + goto out; + + x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, + (xfrm_address_t *)&ip_hdr(skb)->daddr, + spi, IPPROTO_ESP, AF_INET); + if (!x) + goto out; + + skb->sp->xvec[skb->sp->len++] = x; + skb->sp->olen++; + + xo = xfrm_offload(skb); + if (!xo) { + xfrm_state_put(x); + goto out; + } + xo->flags |= XFRM_GRO; + + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; + XFRM_SPI_SKB_CB(skb)->family = AF_INET; + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); + XFRM_SPI_SKB_CB(skb)->seq = seq; + + /* We don't need to handle errors from xfrm_input, it does all + * the error handling and frees the resources on error. */ + xfrm_input(skb, IPPROTO_ESP, spi, -2); + + return ERR_PTR(-EINPROGRESS); +out: + skb_push(skb, offset); + NAPI_GRO_CB(skb)->same_flow = 0; + NAPI_GRO_CB(skb)->flush = 1; + + return NULL; +} + +static const struct net_offload esp4_offload = { + .callbacks = { + .gro_receive = esp4_gro_receive, + }, +}; + +static int __init esp4_offload_init(void) +{ + return inet_add_offload(&esp4_offload, IPPROTO_ESP); +} + +static void __exit esp4_offload_exit(void) +{ + inet_del_offload(&esp4_offload, IPPROTO_ESP); +} + +module_init(esp4_offload_init); +module_exit(esp4_offload_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Steffen Klassert <[email protected]>"); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d44a6989e76d..da385ae997a3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1288,7 +1288,7 @@ new_segment: } else { skb_fill_page_desc(skb, i, pfrag->page, pfrag->offset, copy); - get_page(pfrag->page); + page_ref_inc(pfrag->page); } pfrag->offset += copy; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 61f272a99a49..22548b5f05cb 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -83,7 +83,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) tcp_skb_pcount(skb)); } -/* SND.NXT, if window was not shrunk. +/* SND.NXT, if window was not shrunk or the amount of shrunk was less than one + * window scaling factor due to loss of precision. * If window has been shrunk, what should we make? It is not clear at all. * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( * Anything in between SND.UNA...SND.UNA+SND.WND also can be already @@ -93,7 +94,9 @@ static inline __u32 tcp_acceptable_seq(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); - if (!before(tcp_wnd_end(tp), tp->snd_nxt)) + if (!before(tcp_wnd_end(tp), tp->snd_nxt) || + (tp->rx_opt.wscale_ok && + ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) return tp->snd_nxt; else return tcp_wnd_end(tp); diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index f6c50af24a64..3d063eb37848 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, (fwmark > 0 && skb->mark == fwmark)) && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { - spin_lock(&tcp_probe.lock); + spin_lock_bh(&tcp_probe.lock); /* If log fills, just silently drop */ if (tcp_probe_avail() > 1) { struct tcp_log *p = tcp_probe.log + tcp_probe.head; @@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); } tcp_probe.lastcwnd = tp->snd_cwnd; - spin_unlock(&tcp_probe.lock); + spin_unlock_bh(&tcp_probe.lock); wake_up(&tcp_probe.wait); } diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index 62e1e72db461..1fc684111ce6 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -40,6 +40,7 @@ drop: int xfrm4_transport_finish(struct sk_buff *skb, int async) { + struct xfrm_offload *xo = xfrm_offload(skb); struct iphdr *iph = ip_hdr(skb); iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol; @@ -53,6 +54,11 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async) iph->tot_len = htons(skb->len); ip_send_check(iph); + if (xo && (xo->flags & XFRM_GRO)) { + skb_mac_header_rebuild(skb); + return 0; + } + NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, dev_net(skb->dev), NULL, skb, skb->dev, NULL, xfrm4_rcv_encap_finish); diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c index fd840c7d75ea..4acc0508c5eb 100644 --- a/net/ipv4/xfrm4_mode_transport.c +++ b/net/ipv4/xfrm4_mode_transport.c @@ -43,6 +43,7 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) { int ihl = skb->data - skb_transport_header(skb); + struct xfrm_offload *xo = xfrm_offload(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), @@ -50,7 +51,8 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) skb->network_header = skb->transport_header; } ip_hdr(skb)->tot_len = htons(skb->len + ihl); - skb_reset_transport_header(skb); + if (!xo || !(xo->flags & XFRM_GRO)) + skb_reset_transport_header(skb); return 0; } diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 6a7ff6957535..71b4ecc195c7 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -17,8 +17,6 @@ #include <net/ip.h> #include <net/l3mdev.h> -static struct xfrm_policy_afinfo xfrm4_policy_afinfo; - static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, int tos, int oif, const xfrm_address_t *saddr, @@ -219,7 +217,7 @@ static inline int xfrm4_garbage_collect(struct dst_ops *ops) { struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops); - xfrm4_policy_afinfo.garbage_collect(net); + xfrm_garbage_collect_deferred(net); return (dst_entries_get_slow(ops) > ops->gc_thresh * 2); } @@ -271,8 +269,7 @@ static struct dst_ops xfrm4_dst_ops_template = { .gc_thresh = INT_MAX, }; -static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { - .family = AF_INET, +static const struct xfrm_policy_afinfo xfrm4_policy_afinfo = { .dst_ops = &xfrm4_dst_ops_template, .dst_lookup = xfrm4_dst_lookup, .get_saddr = xfrm4_get_saddr, @@ -376,7 +373,7 @@ static struct pernet_operations __net_initdata xfrm4_net_ops = { static void __init xfrm4_policy_init(void) { - xfrm_policy_register_afinfo(&xfrm4_policy_afinfo); + xfrm_policy_register_afinfo(&xfrm4_policy_afinfo, AF_INET); } void __init xfrm4_init(void) diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c index dccefa9d84cf..8dd0e6ab8606 100644 --- a/net/ipv4/xfrm4_protocol.c +++ b/net/ipv4/xfrm4_protocol.c @@ -188,9 +188,8 @@ static const struct net_protocol ipcomp4_protocol = { .netns_ok = 1, }; -static struct xfrm_input_afinfo xfrm4_input_afinfo = { +static const struct xfrm_input_afinfo xfrm4_input_afinfo = { .family = AF_INET, - .owner = THIS_MODULE, .callback = xfrm4_rcv_cb, }; diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 3c7c76b2a7ba..e2afe677a9d9 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -75,6 +75,19 @@ config INET6_ESP If unsure, say Y. +config INET6_ESP_OFFLOAD + tristate "IPv6: ESP transformation offload" + depends on INET6_ESP + select XFRM_OFFLOAD + default n + ---help--- + Support for ESP transformation offload. This makes sense + only if this system really does IPsec and want to do it + with high throughput. A typical desktop system does not + need it, even if it does IPsec. + + If unsure, say N. + config INET6_IPCOMP tristate "IPv6: IPComp transformation" select INET6_XFRM_TUNNEL diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index a9e9fec387ce..217e9ff0e24b 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -30,6 +30,7 @@ ipv6-objs += $(ipv6-y) obj-$(CONFIG_INET6_AH) += ah6.o obj-$(CONFIG_INET6_ESP) += esp6.o +obj-$(CONFIG_INET6_ESP_OFFLOAD) += esp6_offload.o obj-$(CONFIG_INET6_IPCOMP) += ipcomp6.o obj-$(CONFIG_INET6_XFRM_TUNNEL) += xfrm6_tunnel.o obj-$(CONFIG_INET6_TUNNEL) += tunnel6.o diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index a3eaafd87100..eec27f87efac 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -167,18 +167,22 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, if (np->sndflow) fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; - addr_type = ipv6_addr_type(&usin->sin6_addr); - - if (addr_type == IPV6_ADDR_ANY) { + if (ipv6_addr_any(&usin->sin6_addr)) { /* * connect to self */ - usin->sin6_addr.s6_addr[15] = 0x01; + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), + &usin->sin6_addr); + else + usin->sin6_addr = in6addr_loopback; } + addr_type = ipv6_addr_type(&usin->sin6_addr); + daddr = &usin->sin6_addr; - if (addr_type == IPV6_ADDR_MAPPED) { + if (addr_type & IPV6_ADDR_MAPPED) { struct sockaddr_in sin; if (__ipv6_only_sock(sk)) { diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c new file mode 100644 index 000000000000..d914eb93204a --- /dev/null +++ b/net/ipv6/esp6_offload.c @@ -0,0 +1,108 @@ +/* + * IPV6 GSO/GRO offload support + * Linux INET implementation + * + * Copyright (C) 2016 secunet Security Networks AG + * Author: Steffen Klassert <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * ESP GRO support + */ + +#include <linux/skbuff.h> +#include <linux/init.h> +#include <net/protocol.h> +#include <crypto/aead.h> +#include <crypto/authenc.h> +#include <linux/err.h> +#include <linux/module.h> +#include <net/ip.h> +#include <net/xfrm.h> +#include <net/esp.h> +#include <linux/scatterlist.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <net/ip6_route.h> +#include <net/ipv6.h> +#include <linux/icmpv6.h> + +static struct sk_buff **esp6_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + int offset = skb_gro_offset(skb); + struct xfrm_offload *xo; + struct xfrm_state *x; + __be32 seq; + __be32 spi; + int err; + + skb_pull(skb, offset); + + if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) + goto out; + + err = secpath_set(skb); + if (err) + goto out; + + if (skb->sp->len == XFRM_MAX_DEPTH) + goto out; + + x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, + (xfrm_address_t *)&ipv6_hdr(skb)->daddr, + spi, IPPROTO_ESP, AF_INET6); + if (!x) + goto out; + + skb->sp->xvec[skb->sp->len++] = x; + skb->sp->olen++; + + xo = xfrm_offload(skb); + if (!xo) { + xfrm_state_put(x); + goto out; + } + xo->flags |= XFRM_GRO; + + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; + XFRM_SPI_SKB_CB(skb)->family = AF_INET6; + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); + XFRM_SPI_SKB_CB(skb)->seq = seq; + + /* We don't need to handle errors from xfrm_input, it does all + * the error handling and frees the resources on error. */ + xfrm_input(skb, IPPROTO_ESP, spi, -2); + + return ERR_PTR(-EINPROGRESS); +out: + skb_push(skb, offset); + NAPI_GRO_CB(skb)->same_flow = 0; + NAPI_GRO_CB(skb)->flush = 1; + + return NULL; +} + +static const struct net_offload esp6_offload = { + .callbacks = { + .gro_receive = esp6_gro_receive, + }, +}; + +static int __init esp6_offload_init(void) +{ + return inet6_add_offload(&esp6_offload, IPPROTO_ESP); +} + +static void __exit esp6_offload_exit(void) +{ + inet6_del_offload(&esp6_offload, IPPROTO_ESP); +} + +module_init(esp6_offload_init); +module_exit(esp6_offload_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Steffen Klassert <[email protected]>"); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index fc7b4017ba24..0838e6d01d2e 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -253,7 +253,7 @@ out_unlock: rcu_read_unlock(); out: - NAPI_GRO_CB(skb)->flush |= flush; + skb_gro_flush_final(skb, pp, flush); return pp; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a75871c62328..528b3c1f3fde 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1022,6 +1022,11 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, } } #endif + if (ipv6_addr_v4mapped(&fl6->saddr) && + !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) { + err = -EAFNOSUPPORT; + goto out_err_release; + } return 0; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b5d27212db2f..21c719965b6b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -149,8 +149,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, * connect() to INADDR_ANY means loopback (BSD'ism). */ - if (ipv6_addr_any(&usin->sin6_addr)) - usin->sin6_addr.s6_addr[15] = 0x1; + if (ipv6_addr_any(&usin->sin6_addr)) { + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), + &usin->sin6_addr); + else + usin->sin6_addr = in6addr_loopback; + } addr_type = ipv6_addr_type(&usin->sin6_addr); @@ -189,7 +194,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, * TCP over IPv4 */ - if (addr_type == IPV6_ADDR_MAPPED) { + if (addr_type & IPV6_ADDR_MAPPED) { u32 exthdrlen = icsk->icsk_ext_hdr_len; struct sockaddr_in sin; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index df71ba05f41d..4e4c401e3bc6 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1046,6 +1046,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; daddr = &sin6->sin6_addr; + if (ipv6_addr_any(daddr) && + ipv6_addr_v4mapped(&np->saddr)) + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), + daddr); break; case AF_INET: goto do_udp_sendmsg; diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index b5789562aded..08a807b29298 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -33,6 +33,8 @@ EXPORT_SYMBOL(xfrm6_rcv_spi); int xfrm6_transport_finish(struct sk_buff *skb, int async) { + struct xfrm_offload *xo = xfrm_offload(skb); + skb_network_header(skb)[IP6CB(skb)->nhoff] = XFRM_MODE_SKB_CB(skb)->protocol; @@ -44,6 +46,11 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) ipv6_hdr(skb)->payload_len = htons(skb->len); __skb_push(skb, skb->data - skb_network_header(skb)); + if (xo && (xo->flags & XFRM_GRO)) { + skb_mac_header_rebuild(skb); + return -1; + } + NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, dev_net(skb->dev), NULL, skb, skb->dev, NULL, ip6_rcv_finish); @@ -69,18 +76,9 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, struct xfrm_state *x = NULL; int i = 0; - /* Allocate new secpath or COW existing one. */ - if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { - struct sec_path *sp; - - sp = secpath_dup(skb->sp); - if (!sp) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); - goto drop; - } - if (skb->sp) - secpath_put(skb->sp); - skb->sp = sp; + if (secpath_set(skb)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); + goto drop; } if (1 + skb->sp->len == XFRM_MAX_DEPTH) { diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index 4e344105b3fd..4439ee44c8b0 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c @@ -47,6 +47,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) { int ihl = skb->data - skb_transport_header(skb); + struct xfrm_offload *xo = xfrm_offload(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), @@ -55,7 +56,8 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) } ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - sizeof(struct ipv6hdr)); - skb_reset_transport_header(skb); + if (!xo || !(xo->flags & XFRM_GRO)) + skb_reset_transport_header(skb); return 0; } diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index e0f71c01d728..79651bc71bf0 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -25,8 +25,6 @@ #include <net/mip6.h> #endif -static struct xfrm_policy_afinfo xfrm6_policy_afinfo; - static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr) @@ -220,7 +218,7 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops) { struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops); - xfrm6_policy_afinfo.garbage_collect(net); + xfrm_garbage_collect_deferred(net); return dst_entries_get_fast(ops) > ops->gc_thresh * 2; } @@ -291,8 +289,7 @@ static struct dst_ops xfrm6_dst_ops_template = { .gc_thresh = INT_MAX, }; -static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { - .family = AF_INET6, +static const struct xfrm_policy_afinfo xfrm6_policy_afinfo = { .dst_ops = &xfrm6_dst_ops_template, .dst_lookup = xfrm6_dst_lookup, .get_saddr = xfrm6_get_saddr, @@ -305,7 +302,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { static int __init xfrm6_policy_init(void) { - return xfrm_policy_register_afinfo(&xfrm6_policy_afinfo); + return xfrm_policy_register_afinfo(&xfrm6_policy_afinfo, AF_INET6); } static void xfrm6_policy_fini(void) diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c index 54d13f8dbbae..b2dc8ce49378 100644 --- a/net/ipv6/xfrm6_protocol.c +++ b/net/ipv6/xfrm6_protocol.c @@ -162,9 +162,8 @@ static const struct inet6_protocol ipcomp6_protocol = { .flags = INET6_PROTO_NOPOLICY, }; -static struct xfrm_input_afinfo xfrm6_input_afinfo = { +static const struct xfrm_input_afinfo xfrm6_input_afinfo = { .family = AF_INET6, - .owner = THIS_MODULE, .callback = xfrm6_rcv_cb, }; diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c index acbe61c7e683..160dc89335e2 100644 --- a/net/irda/irqueue.c +++ b/net/irda/irqueue.c @@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new); * for deallocating this structure if it's complex. If not the user can * just supply kfree, which should take care of the job. */ -#ifdef CONFIG_LOCKDEP -static int hashbin_lock_depth = 0; -#endif int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) { irda_queue_t* queue; @@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); /* Synchronize */ - if ( hashbin->hb_type & HB_LOCK ) { - spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, - hashbin_lock_depth++); - } + if (hashbin->hb_type & HB_LOCK) + spin_lock_irqsave(&hashbin->hb_spinlock, flags); /* * Free the entries in the hashbin, TODO: use hashbin_clear when * it has been shown to work */ for (i = 0; i < HASHBIN_SIZE; i ++ ) { - queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); - while (queue ) { - if (free_func) - (*free_func)(queue); - queue = dequeue_first( - (irda_queue_t**) &hashbin->hb_queue[i]); + while (1) { + queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); + + if (!queue) + break; + + if (free_func) { + if (hashbin->hb_type & HB_LOCK) + spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); + free_func(queue); + if (hashbin->hb_type & HB_LOCK) + spin_lock_irqsave(&hashbin->hb_spinlock, flags); + } } } @@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) hashbin->magic = ~HB_MAGIC; /* Release lock */ - if ( hashbin->hb_type & HB_LOCK) { + if (hashbin->hb_type & HB_LOCK) spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); -#ifdef CONFIG_LOCKDEP - hashbin_lock_depth--; -#endif - } /* * Free the hashbin structure diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 64f0e8531af0..a646f3481240 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1044,8 +1044,10 @@ wait_for_memory: } else { /* Message not complete, save state */ partial_message: - kcm->seq_skb = head; - kcm_tx_msg(head)->last_skb = skb; + if (head) { + kcm->seq_skb = head; + kcm_tx_msg(head)->last_skb = skb; + } } KCM_STATS_ADD(kcm->stats.tx_bytes, copied); diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 3e821daf9dd4..8bc5a1bd2d45 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) * another trick required to cope with how the PROCOM state * machine works. -acme */ + skb_orphan(skb); + sock_hold(sk); skb->sk = sk; + skb->destructor = sock_efree; } if (!sock_owned_by_user(sk)) llc_conn_rcv(sk, skb); diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index d0e1e804ebd7..5404d0d195cc 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb, ev->type = LLC_SAP_EV_TYPE_PDU; ev->reason = 0; + skb_orphan(skb); + sock_hold(sk); skb->sk = sk; + skb->destructor = sock_efree; llc_sap_state_process(sap, skb); } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 64d3bf269a26..3818686182b2 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -7,6 +7,7 @@ #include <linux/if_arp.h> #include <linux/ipv6.h> #include <linux/mpls.h> +#include <linux/netconf.h> #include <linux/vmalloc.h> #include <linux/percpu.h> #include <net/ip.h> @@ -960,15 +961,215 @@ static size_t mpls_get_stats_af_size(const struct net_device *dev) return nla_total_size_64bit(sizeof(struct mpls_link_stats)); } +static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev, + u32 portid, u32 seq, int event, + unsigned int flags, int type) +{ + struct nlmsghdr *nlh; + struct netconfmsg *ncm; + bool all = false; + + nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg), + flags); + if (!nlh) + return -EMSGSIZE; + + if (type == NETCONFA_ALL) + all = true; + + ncm = nlmsg_data(nlh); + ncm->ncm_family = AF_MPLS; + + if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0) + goto nla_put_failure; + + if ((all || type == NETCONFA_INPUT) && + nla_put_s32(skb, NETCONFA_INPUT, + mdev->input_enabled) < 0) + goto nla_put_failure; + + nlmsg_end(skb, nlh); + return 0; + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int mpls_netconf_msgsize_devconf(int type) +{ + int size = NLMSG_ALIGN(sizeof(struct netconfmsg)) + + nla_total_size(4); /* NETCONFA_IFINDEX */ + bool all = false; + + if (type == NETCONFA_ALL) + all = true; + + if (all || type == NETCONFA_INPUT) + size += nla_total_size(4); + + return size; +} + +static void mpls_netconf_notify_devconf(struct net *net, int type, + struct mpls_dev *mdev) +{ + struct sk_buff *skb; + int err = -ENOBUFS; + + skb = nlmsg_new(mpls_netconf_msgsize_devconf(type), GFP_KERNEL); + if (!skb) + goto errout; + + err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, RTM_NEWNETCONF, + 0, type); + if (err < 0) { + /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */ + WARN_ON(err == -EMSGSIZE); + kfree_skb(skb); + goto errout; + } + + rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL); + return; +errout: + if (err < 0) + rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err); +} + +static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = { + [NETCONFA_IFINDEX] = { .len = sizeof(int) }, +}; + +static int mpls_netconf_get_devconf(struct sk_buff *in_skb, + struct nlmsghdr *nlh) +{ + struct net *net = sock_net(in_skb->sk); + struct nlattr *tb[NETCONFA_MAX + 1]; + struct netconfmsg *ncm; + struct net_device *dev; + struct mpls_dev *mdev; + struct sk_buff *skb; + int ifindex; + int err; + + err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, + devconf_mpls_policy); + if (err < 0) + goto errout; + + err = -EINVAL; + if (!tb[NETCONFA_IFINDEX]) + goto errout; + + ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]); + dev = __dev_get_by_index(net, ifindex); + if (!dev) + goto errout; + + mdev = mpls_dev_get(dev); + if (!mdev) + goto errout; + + err = -ENOBUFS; + skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL); + if (!skb) + goto errout; + + err = mpls_netconf_fill_devconf(skb, mdev, + NETLINK_CB(in_skb).portid, + nlh->nlmsg_seq, RTM_NEWNETCONF, 0, + NETCONFA_ALL); + if (err < 0) { + /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */ + WARN_ON(err == -EMSGSIZE); + kfree_skb(skb); + goto errout; + } + err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); +errout: + return err; +} + +static int mpls_netconf_dump_devconf(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct hlist_head *head; + struct net_device *dev; + struct mpls_dev *mdev; + int idx, s_idx; + int h, s_h; + + s_h = cb->args[0]; + s_idx = idx = cb->args[1]; + + for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { + idx = 0; + head = &net->dev_index_head[h]; + rcu_read_lock(); + cb->seq = net->dev_base_seq; + hlist_for_each_entry_rcu(dev, head, index_hlist) { + if (idx < s_idx) + goto cont; + mdev = mpls_dev_get(dev); + if (!mdev) + goto cont; + if (mpls_netconf_fill_devconf(skb, mdev, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNETCONF, + NLM_F_MULTI, + NETCONFA_ALL) < 0) { + rcu_read_unlock(); + goto done; + } + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); +cont: + idx++; + } + rcu_read_unlock(); + } +done: + cb->args[0] = h; + cb->args[1] = idx; + + return skb->len; +} + #define MPLS_PERDEV_SYSCTL_OFFSET(field) \ (&((struct mpls_dev *)0)->field) +static int mpls_conf_proc(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int oval = *(int *)ctl->data; + int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (write) { + struct mpls_dev *mdev = ctl->extra1; + int i = (int *)ctl->data - (int *)mdev; + struct net *net = ctl->extra2; + int val = *(int *)ctl->data; + + if (i == offsetof(struct mpls_dev, input_enabled) && + val != oval) { + mpls_netconf_notify_devconf(net, + NETCONFA_INPUT, + mdev); + } + } + + return ret; +} + static const struct ctl_table mpls_dev_table[] = { { .procname = "input", .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = mpls_conf_proc, .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled), }, { } @@ -978,6 +1179,7 @@ static int mpls_dev_sysctl_register(struct net_device *dev, struct mpls_dev *mdev) { char path[sizeof("net/mpls/conf/") + IFNAMSIZ]; + struct net *net = dev_net(dev); struct ctl_table *table; int i; @@ -988,8 +1190,11 @@ static int mpls_dev_sysctl_register(struct net_device *dev, /* Table data contains only offsets relative to the base of * the mdev at this point, so make them absolute. */ - for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) + for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) { table[i].data = (char *)mdev + (uintptr_t)table[i].data; + table[i].extra1 = mdev; + table[i].extra2 = net; + } snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name); @@ -1041,6 +1246,7 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev) if (err) goto free; + mdev->dev = dev; rcu_assign_pointer(dev->mpls_ptr, mdev); return mdev; @@ -1861,6 +2067,8 @@ static int __init mpls_init(void) rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL); rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL); rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL); + rtnl_register(PF_MPLS, RTM_GETNETCONF, mpls_netconf_get_devconf, + mpls_netconf_dump_devconf, NULL); err = 0; out: return err; diff --git a/net/mpls/internal.h b/net/mpls/internal.h index d97243034605..76360d8b9579 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -16,7 +16,7 @@ struct mpls_pcpu_stats { struct mpls_dev { int input_enabled; - + struct net_device *dev; struct mpls_pcpu_stats __percpu *stats; struct ctl_table_header *sysctl; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index c2d452eab0c5..85cd59526670 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -339,9 +339,7 @@ static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct) /* Initialize labels for a new, yet to be committed conntrack entry. Note that * since the new connection is not yet confirmed, and thus no-one else has - * access to it's labels, we simply write them over. Also, we refrain from - * triggering events, as receiving change events before the create event would - * be confusing. + * access to it's labels, we simply write them over. */ static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key, const struct ovs_key_ct_labels *labels, @@ -374,6 +372,11 @@ static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key, & mask->ct_labels_32[i]); } + /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the + * IPCT_LABEL bit it set in the event cache. + */ + nf_conntrack_event_cache(IPCT_LABEL, ct); + memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN); return 0; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a984f6f9ab4e..2bd0d1949312 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1505,6 +1505,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po) f->arr[f->num_members] = sk; smp_wmb(); f->num_members++; + if (f->num_members == 1) + dev_add_pack(&f->prot_hook); spin_unlock(&f->lock); } @@ -1521,6 +1523,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) BUG_ON(i >= f->num_members); f->arr[i] = f->arr[f->num_members - 1]; f->num_members--; + if (f->num_members == 0) + __dev_remove_pack(&f->prot_hook); spin_unlock(&f->lock); } @@ -1627,6 +1631,7 @@ static void fanout_release_data(struct packet_fanout *f) static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { + struct packet_rollover *rollover = NULL; struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f, *match; u8 type = type_flags & 0xff; @@ -1649,23 +1654,28 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) return -EINVAL; } + mutex_lock(&fanout_mutex); + + err = -EINVAL; if (!po->running) - return -EINVAL; + goto out; + err = -EALREADY; if (po->fanout) - return -EALREADY; + goto out; if (type == PACKET_FANOUT_ROLLOVER || (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { - po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); - if (!po->rollover) - return -ENOMEM; - atomic_long_set(&po->rollover->num, 0); - atomic_long_set(&po->rollover->num_huge, 0); - atomic_long_set(&po->rollover->num_failed, 0); + err = -ENOMEM; + rollover = kzalloc(sizeof(*rollover), GFP_KERNEL); + if (!rollover) + goto out; + atomic_long_set(&rollover->num, 0); + atomic_long_set(&rollover->num_huge, 0); + atomic_long_set(&rollover->num_failed, 0); + po->rollover = rollover; } - mutex_lock(&fanout_mutex); match = NULL; list_for_each_entry(f, &fanout_list, list) { if (f->id == id && @@ -1695,7 +1705,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; match->prot_hook.id_match = match_fanout_group; - dev_add_pack(&match->prot_hook); list_add(&match->list, &fanout_list); } err = -EINVAL; @@ -1712,36 +1721,40 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) } } out: - mutex_unlock(&fanout_mutex); - if (err) { - kfree(po->rollover); + if (err && rollover) { + kfree(rollover); po->rollover = NULL; } + mutex_unlock(&fanout_mutex); return err; } -static void fanout_release(struct sock *sk) +/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes + * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout. + * It is the responsibility of the caller to call fanout_release_data() and + * free the returned packet_fanout (after synchronize_net()) + */ +static struct packet_fanout *fanout_release(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); struct packet_fanout *f; + mutex_lock(&fanout_mutex); f = po->fanout; - if (!f) - return; + if (f) { + po->fanout = NULL; - mutex_lock(&fanout_mutex); - po->fanout = NULL; + if (atomic_dec_and_test(&f->sk_ref)) + list_del(&f->list); + else + f = NULL; - if (atomic_dec_and_test(&f->sk_ref)) { - list_del(&f->list); - dev_remove_pack(&f->prot_hook); - fanout_release_data(f); - kfree(f); + if (po->rollover) + kfree_rcu(po->rollover, rcu); } mutex_unlock(&fanout_mutex); - if (po->rollover) - kfree_rcu(po->rollover, rcu); + return f; } static bool packet_extra_vlan_len_allowed(const struct net_device *dev, @@ -2928,6 +2941,7 @@ static int packet_release(struct socket *sock) { struct sock *sk = sock->sk; struct packet_sock *po; + struct packet_fanout *f; struct net *net; union tpacket_req_u req_u; @@ -2967,9 +2981,14 @@ static int packet_release(struct socket *sock) packet_set_ring(sk, &req_u, 1, 1); } - fanout_release(sk); + f = fanout_release(sk); synchronize_net(); + + if (f) { + fanout_release_data(f); + kfree(f); + } /* * Now the socket is dead. No more input will appear. */ @@ -3921,7 +3940,6 @@ static int packet_notifier(struct notifier_block *this, } if (msg == NETDEV_UNREGISTER) { packet_cached_dev_reset(po); - fanout_release(sk); po->ifindex = -1; if (po->prot_hook.dev) dev_put(po->prot_hook.dev); diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 5e72de10c484..6ab39dbcca01 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -770,7 +770,6 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos); if (work_alloc != 1) { - rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_stats_inc(s_ib_tx_ring_full); ret = -ENOMEM; goto out; diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile index 8fc6ea347182..b9da4d6b914f 100644 --- a/net/rxrpc/Makefile +++ b/net/rxrpc/Makefile @@ -2,7 +2,9 @@ # Makefile for Linux kernel RxRPC # -af-rxrpc-y := \ +obj-$(CONFIG_AF_RXRPC) += rxrpc.o + +rxrpc-y := \ af_rxrpc.o \ call_accept.o \ call_event.o \ @@ -26,8 +28,6 @@ af-rxrpc-y := \ skbuff.o \ utils.o -af-rxrpc-$(CONFIG_PROC_FS) += proc.o -af-rxrpc-$(CONFIG_RXKAD) += rxkad.o -af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o - -obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o +rxrpc-$(CONFIG_PROC_FS) += proc.o +rxrpc-$(CONFIG_RXKAD) += rxkad.o +rxrpc-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index d9c97018317d..80f688436dd7 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -148,6 +148,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_bpf_offload bpf_offload = {}; struct tc_to_netdev offload; + int err; offload.type = TC_SETUP_CLSBPF; offload.cls_bpf = &bpf_offload; @@ -159,8 +160,13 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, bpf_offload.exts_integrated = prog->exts_integrated; bpf_offload.gen_flags = prog->gen_flags; - return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, - tp->protocol, &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, + tp->protocol, &offload); + + if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE)) + prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; + + return err; } static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, @@ -511,6 +517,9 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, return ret; } + if (!tc_in_hw(prog->gen_flags)) + prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; + if (oldprog) { list_replace_rcu(&oldprog->link, &prog->link); tcf_unbind_filter(tp, &oldprog->res); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 0826c8ec3a76..9d0c99d2e9fb 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -273,6 +273,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, tc); + if (!err) + f->flags |= TCA_CLS_FLAGS_IN_HW; if (tc_skip_sw(f->flags)) return err; @@ -912,6 +914,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, goto errout; } + if (!tc_in_hw(fnew->flags)) + fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + if (fold) { if (!tc_skip_sw(fold->flags)) rhashtable_remove_fast(&head->ht, &fold->ht_node, @@ -1229,7 +1234,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) goto nla_put_failure; - nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags); + if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) + goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts)) goto nla_put_failure; diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index f2141cb55562..224eb2c14346 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -56,6 +56,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, struct net_device *dev = tp->q->dev_queue->dev; struct tc_to_netdev offload; struct tc_cls_matchall_offload mall_offload = {0}; + int err; offload.type = TC_SETUP_MATCHALL; offload.cls_mall = &mall_offload; @@ -63,8 +64,12 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, offload.cls_mall->exts = &head->exts; offload.cls_mall->cookie = cookie; - return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, - &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, + &offload); + if (!err) + head->flags |= TCA_CLS_FLAGS_IN_HW; + + return err; } static void mall_destroy_hw_filter(struct tcf_proto *tp, @@ -194,6 +199,9 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, } } + if (!tc_in_hw(new->flags)) + new->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + *arg = (unsigned long) head; rcu_assign_pointer(tp->root, new); if (head) @@ -244,6 +252,9 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid)) goto nla_put_failure; + if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags)) + goto nla_put_failure; + if (tcf_exts_dump(skb, &head->exts)) goto nla_put_failure; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index a6ec3e4b57ab..4dbe0c680fe6 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -523,6 +523,10 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &offload); + + if (!err) + n->flags |= TCA_CLS_FLAGS_IN_HW; + if (tc_skip_sw(flags)) return err; @@ -895,6 +899,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return err; } + if (!tc_in_hw(new->flags)) + new->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + u32_replace_knode(tp, tp_c, new); tcf_unbind_filter(tp, &n->res); call_rcu(&n->rcu, u32_delete_key_rcu); @@ -1014,6 +1021,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, if (err) goto errhw; + if (!tc_in_hw(n->flags)) + n->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + ins = &ht->ht[TC_U32_HASH(handle)]; for (pins = rtnl_dereference(*ins); pins; ins = &pins->next, pins = rtnl_dereference(*ins)) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index a13c15e8f087..bcf49cd22786 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -440,7 +440,6 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab) EXPORT_SYMBOL(qdisc_put_rtab); static LIST_HEAD(qdisc_stab_list); -static DEFINE_SPINLOCK(qdisc_stab_lock); static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = { [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) }, @@ -474,20 +473,15 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) if (tsize != s->tsize || (!tab && tsize > 0)) return ERR_PTR(-EINVAL); - spin_lock(&qdisc_stab_lock); - list_for_each_entry(stab, &qdisc_stab_list, list) { if (memcmp(&stab->szopts, s, sizeof(*s))) continue; if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) continue; stab->refcnt++; - spin_unlock(&qdisc_stab_lock); return stab; } - spin_unlock(&qdisc_stab_lock); - stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); if (!stab) return ERR_PTR(-ENOMEM); @@ -497,9 +491,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt) if (tsize > 0) memcpy(stab->data, tab, tsize * sizeof(u16)); - spin_lock(&qdisc_stab_lock); list_add_tail(&stab->list, &qdisc_stab_list); - spin_unlock(&qdisc_stab_lock); return stab; } @@ -514,14 +506,10 @@ void qdisc_put_stab(struct qdisc_size_table *tab) if (!tab) return; - spin_lock(&qdisc_stab_lock); - if (--tab->refcnt == 0) { list_del(&tab->list); call_rcu_bh(&tab->rcu, stab_kfree_rcu); } - - spin_unlock(&qdisc_stab_lock); } EXPORT_SYMBOL(qdisc_put_stab); diff --git a/net/sctp/input.c b/net/sctp/input.c index 704ad19c1565..fc458968fe4b 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -872,6 +872,8 @@ void sctp_transport_hashtable_destroy(void) int sctp_hash_transport(struct sctp_transport *t) { + struct sctp_transport *transport; + struct rhlist_head *tmp, *list; struct sctp_hash_cmp_arg arg; int err; @@ -882,8 +884,19 @@ int sctp_hash_transport(struct sctp_transport *t) arg.paddr = &t->ipaddr; arg.lport = htons(t->asoc->base.bind_addr.port); + list = rhltable_lookup(&sctp_transport_hashtable, &arg, + sctp_hash_params); + + rhl_for_each_entry_rcu(transport, tmp, list, node) + if (transport->asoc->ep == t->asoc->ep) { + err = -EEXIST; + goto out; + } + err = rhltable_insert_key(&sctp_transport_hashtable, &arg, &t->node, sctp_hash_params); + +out: if (err) pr_err_once("insert transport fail, errno %d\n", err); diff --git a/net/sctp/output.c b/net/sctp/output.c index 814eac047467..85406d5f8f41 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -704,18 +704,15 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, * unacknowledged. */ - if (sctp_sk(asoc->base.sk)->nodelay) - /* Nagle disabled */ + if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && + !chunk->msg->force_delay) + /* Nothing unacked */ return SCTP_XMIT_OK; if (!sctp_packet_empty(packet)) /* Append to packet */ return SCTP_XMIT_OK; - if (inflight == 0) - /* Nothing unacked */ - return SCTP_XMIT_OK; - if (!sctp_state(asoc, ESTABLISHED)) return SCTP_XMIT_OK; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 7f8dbf2c6cee..969a30c7bb54 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3617,7 +3617,7 @@ struct sctp_chunk *sctp_make_strreset_req( __u16 stream_len = stream_num * 2; struct sctp_strreset_inreq inreq; struct sctp_chunk *retval; - __u16 outlen, inlen, i; + __u16 outlen, inlen; outlen = (sizeof(outreq) + stream_len) * out; inlen = (sizeof(inreq) + stream_len) * in; @@ -3626,9 +3626,6 @@ struct sctp_chunk *sctp_make_strreset_req( if (!retval) return NULL; - for (i = 0; i < stream_num; i++) - stream_list[i] = htons(stream_list[i]); - if (outlen) { outreq.param_hdr.type = SCTP_PARAM_RESET_OUT_REQUEST; outreq.param_hdr.length = htons(outlen); @@ -3653,9 +3650,6 @@ struct sctp_chunk *sctp_make_strreset_req( sctp_addto_chunk(retval, stream_len, stream_list); } - for (i = 0; i < stream_num; i++) - stream_list[i] = ntohs(stream_list[i]); - return retval; } @@ -3733,3 +3727,136 @@ struct sctp_chunk *sctp_make_strreset_addstrm( return retval; } + +/* RE-CONFIG 4.4 (RESP) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Parameter Type = 16 | Parameter Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Re-configuration Response Sequence Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Result | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct sctp_chunk *sctp_make_strreset_resp( + const struct sctp_association *asoc, + __u32 result, __u32 sn) +{ + struct sctp_strreset_resp resp; + __u16 length = sizeof(resp); + struct sctp_chunk *retval; + + retval = sctp_make_reconf(asoc, length); + if (!retval) + return NULL; + + resp.param_hdr.type = SCTP_PARAM_RESET_RESPONSE; + resp.param_hdr.length = htons(length); + resp.response_seq = htonl(sn); + resp.result = htonl(result); + + sctp_addto_chunk(retval, sizeof(resp), &resp); + + return retval; +} + +/* RE-CONFIG 4.4 OPTIONAL (TSNRESP) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Parameter Type = 16 | Parameter Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Re-configuration Response Sequence Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Result | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Sender's Next TSN (optional) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Receiver's Next TSN (optional) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct sctp_chunk *sctp_make_strreset_tsnresp( + struct sctp_association *asoc, + __u32 result, __u32 sn, + __u32 sender_tsn, __u32 receiver_tsn) +{ + struct sctp_strreset_resptsn tsnresp; + __u16 length = sizeof(tsnresp); + struct sctp_chunk *retval; + + retval = sctp_make_reconf(asoc, length); + if (!retval) + return NULL; + + tsnresp.param_hdr.type = SCTP_PARAM_RESET_RESPONSE; + tsnresp.param_hdr.length = htons(length); + + tsnresp.response_seq = htonl(sn); + tsnresp.result = htonl(result); + tsnresp.senders_next_tsn = htonl(sender_tsn); + tsnresp.receivers_next_tsn = htonl(receiver_tsn); + + sctp_addto_chunk(retval, sizeof(tsnresp), &tsnresp); + + return retval; +} + +bool sctp_verify_reconf(const struct sctp_association *asoc, + struct sctp_chunk *chunk, + struct sctp_paramhdr **errp) +{ + struct sctp_reconf_chunk *hdr; + union sctp_params param; + __u16 last = 0, cnt = 0; + + hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; + sctp_walk_params(param, hdr, params) { + __u16 length = ntohs(param.p->length); + + *errp = param.p; + if (cnt++ > 2) + return false; + switch (param.p->type) { + case SCTP_PARAM_RESET_OUT_REQUEST: + if (length < sizeof(struct sctp_strreset_outreq) || + (last && last != SCTP_PARAM_RESET_RESPONSE && + last != SCTP_PARAM_RESET_IN_REQUEST)) + return false; + break; + case SCTP_PARAM_RESET_IN_REQUEST: + if (length < sizeof(struct sctp_strreset_inreq) || + (last && last != SCTP_PARAM_RESET_OUT_REQUEST)) + return false; + break; + case SCTP_PARAM_RESET_RESPONSE: + if ((length != sizeof(struct sctp_strreset_resp) && + length != sizeof(struct sctp_strreset_resptsn)) || + (last && last != SCTP_PARAM_RESET_RESPONSE && + last != SCTP_PARAM_RESET_OUT_REQUEST)) + return false; + break; + case SCTP_PARAM_RESET_TSN_REQUEST: + if (length != + sizeof(struct sctp_strreset_tsnreq) || last) + return false; + break; + case SCTP_PARAM_RESET_ADD_IN_STREAMS: + if (length != sizeof(struct sctp_strreset_addstrm) || + (last && last != SCTP_PARAM_RESET_ADD_OUT_STREAMS)) + return false; + break; + case SCTP_PARAM_RESET_ADD_OUT_STREAMS: + if (length != sizeof(struct sctp_strreset_addstrm) || + (last && last != SCTP_PARAM_RESET_ADD_IN_STREAMS)) + return false; + break; + default: + return false; + } + + last = param.p->type; + } + + return true; +} diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 51abcc90fe75..25384fa82ba9 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -872,6 +872,10 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, if (!sctp_style(sk, UDP)) sk->sk_state_change(sk); } + + if (sctp_state(asoc, SHUTDOWN_PENDING) && + !sctp_outq_is_empty(&asoc->outqueue)) + sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC); } /* Helper function to delete an association. */ diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index d8798ddda726..e03bb1aab4d0 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3834,6 +3834,60 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, return SCTP_DISPOSITION_DISCARD; } +/* RE-CONFIG Section 5.2 Upon reception of an RECONF Chunk. */ +sctp_disposition_t sctp_sf_do_reconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_paramhdr *err_param = NULL; + struct sctp_chunk *chunk = arg; + struct sctp_reconf_chunk *hdr; + union sctp_params param; + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + /* Make sure that the RECONF chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(*hdr))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + if (!sctp_verify_reconf(asoc, chunk, &err_param)) + return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, + (void *)err_param, commands); + + hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; + sctp_walk_params(param, hdr, params) { + struct sctp_chunk *reply = NULL; + struct sctp_ulpevent *ev = NULL; + + if (param.p->type == SCTP_PARAM_RESET_OUT_REQUEST) + reply = sctp_process_strreset_outreq( + (struct sctp_association *)asoc, param, &ev); + else if (param.p->type == SCTP_PARAM_RESET_IN_REQUEST) + reply = sctp_process_strreset_inreq( + (struct sctp_association *)asoc, param, &ev); + /* More handles for other types will be added here, by now it + * just ignores other types. + */ + + if (ev) + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ev)); + + if (reply) + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(reply)); + } + + return SCTP_DISPOSITION_CONSUME; +} + /* * PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP * diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index b5438b4f6c1e..419b18ebb056 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -482,6 +482,32 @@ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUN TYPE_SCTP_FWD_TSN, }; /*state_fn_t prsctp_chunk_event_table[][] */ +#define TYPE_SCTP_RECONF { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_reconf), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_reconf), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_RECONF */ + +/* The primary index for this table is the chunk type. + * The secondary index for this table is the state. + */ +static const sctp_sm_table_entry_t reconf_chunk_event_table[SCTP_NUM_RECONF_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_RECONF, +}; /*state_fn_t reconf_chunk_event_table[][] */ + #define TYPE_SCTP_AUTH { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ @@ -964,6 +990,10 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net, return &addip_chunk_event_table[1][state]; } + if (net->sctp.reconf_enable) + if (cid == SCTP_CID_RECONF) + return &reconf_chunk_event_table[0][state]; + if (net->sctp.auth_enable) { if (cid == SCTP_CID_AUTH) return &auth_chunk_event_table[0][state]; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 75f35cea4371..b5321486fbed 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1964,6 +1964,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) err = PTR_ERR(datamsg); goto out_free; } + datamsg->force_delay = !!(msg->msg_flags & MSG_MORE); /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { diff --git a/net/sctp/stream.c b/net/sctp/stream.c index eb02490245ba..1c6cc04fa3a4 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -135,7 +135,14 @@ int sctp_send_reset_streams(struct sctp_association *asoc, if (str_list[i] >= stream->incnt) goto out; + for (i = 0; i < str_nums; i++) + str_list[i] = htons(str_list[i]); + chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in); + + for (i = 0; i < str_nums; i++) + str_list[i] = ntohs(str_list[i]); + if (!chunk) { retval = -ENOMEM; goto out; @@ -294,3 +301,179 @@ int sctp_send_add_streams(struct sctp_association *asoc, out: return retval; } + +static sctp_paramhdr_t *sctp_chunk_lookup_strreset_param( + struct sctp_association *asoc, __u32 resp_seq) +{ + struct sctp_chunk *chunk = asoc->strreset_chunk; + struct sctp_reconf_chunk *hdr; + union sctp_params param; + + if (ntohl(resp_seq) != asoc->strreset_outseq || !chunk) + return NULL; + + hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; + sctp_walk_params(param, hdr, params) { + /* sctp_strreset_tsnreq is actually the basic structure + * of all stream reconf params, so it's safe to use it + * to access request_seq. + */ + struct sctp_strreset_tsnreq *req = param.v; + + if (req->request_seq == resp_seq) + return param.v; + } + + return NULL; +} + +struct sctp_chunk *sctp_process_strreset_outreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp) +{ + struct sctp_strreset_outreq *outreq = param.v; + struct sctp_stream *stream = asoc->stream; + __u16 i, nums, flags = 0, *str_p = NULL; + __u32 result = SCTP_STRRESET_DENIED; + __u32 request_seq; + + request_seq = ntohl(outreq->request_seq); + + if (ntohl(outreq->send_reset_at_tsn) > + sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)) { + result = SCTP_STRRESET_IN_PROGRESS; + goto out; + } + + if (request_seq > asoc->strreset_inseq) { + result = SCTP_STRRESET_ERR_BAD_SEQNO; + goto out; + } else if (request_seq == asoc->strreset_inseq) { + asoc->strreset_inseq++; + } + + /* Check strreset_enable after inseq inc, as sender cannot tell + * the peer doesn't enable strreset after receiving response with + * result denied, as well as to keep consistent with bsd. + */ + if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) + goto out; + + if (asoc->strreset_chunk) { + sctp_paramhdr_t *param_hdr; + struct sctp_transport *t; + + param_hdr = sctp_chunk_lookup_strreset_param( + asoc, outreq->response_seq); + if (!param_hdr || param_hdr->type != + SCTP_PARAM_RESET_IN_REQUEST) { + /* same process with outstanding isn't 0 */ + result = SCTP_STRRESET_ERR_IN_PROGRESS; + goto out; + } + + asoc->strreset_outstanding--; + asoc->strreset_outseq++; + + if (!asoc->strreset_outstanding) { + t = asoc->strreset_chunk->transport; + if (del_timer(&t->reconf_timer)) + sctp_transport_put(t); + + sctp_chunk_put(asoc->strreset_chunk); + asoc->strreset_chunk = NULL; + } + + flags = SCTP_STREAM_RESET_INCOMING_SSN; + } + + nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2; + if (nums) { + str_p = outreq->list_of_streams; + for (i = 0; i < nums; i++) { + if (ntohs(str_p[i]) >= stream->incnt) { + result = SCTP_STRRESET_ERR_WRONG_SSN; + goto out; + } + } + + for (i = 0; i < nums; i++) + stream->in[ntohs(str_p[i])].ssn = 0; + } else { + for (i = 0; i < stream->incnt; i++) + stream->in[i].ssn = 0; + } + + result = SCTP_STRRESET_PERFORMED; + + *evp = sctp_ulpevent_make_stream_reset_event(asoc, + flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p, + GFP_ATOMIC); + +out: + return sctp_make_strreset_resp(asoc, result, request_seq); +} + +struct sctp_chunk *sctp_process_strreset_inreq( + struct sctp_association *asoc, + union sctp_params param, + struct sctp_ulpevent **evp) +{ + struct sctp_strreset_inreq *inreq = param.v; + struct sctp_stream *stream = asoc->stream; + __u32 result = SCTP_STRRESET_DENIED; + struct sctp_chunk *chunk = NULL; + __u16 i, nums, *str_p; + __u32 request_seq; + + request_seq = ntohl(inreq->request_seq); + if (request_seq > asoc->strreset_inseq) { + result = SCTP_STRRESET_ERR_BAD_SEQNO; + goto out; + } else if (request_seq == asoc->strreset_inseq) { + asoc->strreset_inseq++; + } + + if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) + goto out; + + if (asoc->strreset_outstanding) { + result = SCTP_STRRESET_ERR_IN_PROGRESS; + goto out; + } + + nums = (ntohs(param.p->length) - sizeof(*inreq)) / 2; + str_p = inreq->list_of_streams; + for (i = 0; i < nums; i++) { + if (ntohs(str_p[i]) >= stream->outcnt) { + result = SCTP_STRRESET_ERR_WRONG_SSN; + goto out; + } + } + + chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0); + if (!chunk) + goto out; + + if (nums) + for (i = 0; i < nums; i++) + stream->out[ntohs(str_p[i])].state = + SCTP_STREAM_CLOSED; + else + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_CLOSED; + + asoc->strreset_chunk = chunk; + asoc->strreset_outstanding = 1; + sctp_chunk_hold(asoc->strreset_chunk); + + *evp = sctp_ulpevent_make_stream_reset_event(asoc, + SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC); + +out: + if (!chunk) + chunk = sctp_make_strreset_resp(asoc, result, request_seq); + + return chunk; +} diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index bea00058ce35..c8881bc542a0 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -854,6 +854,35 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( return event; } +struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( + const struct sctp_association *asoc, __u16 flags, __u16 stream_num, + __u16 *stream_list, gfp_t gfp) +{ + struct sctp_stream_reset_event *sreset; + struct sctp_ulpevent *event; + struct sk_buff *skb; + int length, i; + + length = sizeof(struct sctp_stream_reset_event) + 2 * stream_num; + event = sctp_ulpevent_new(length, MSG_NOTIFICATION, gfp); + if (!event) + return NULL; + + skb = sctp_event2skb(event); + sreset = (struct sctp_stream_reset_event *)skb_put(skb, length); + + sreset->strreset_type = SCTP_STREAM_RESET_EVENT; + sreset->strreset_flags = flags; + sreset->strreset_length = length; + sctp_ulpevent_set_owner(event, asoc); + sreset->strreset_assoc_id = sctp_assoc2id(asoc); + + for (i = 0; i < stream_num; i++) + sreset->strreset_stream_list[i] = ntohs(stream_list[i]); + + return event; +} + /* Return the notification type, assuming this is a notification * event. */ diff --git a/net/tipc/net.c b/net/tipc/net.c index 28bf4feeb81c..ab8a2d5d1e32 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -110,6 +110,10 @@ int tipc_net_start(struct net *net, u32 addr) char addr_string[16]; tn->own_addr = addr; + + /* Ensure that the new address is visible before we reinit. */ + smp_mb(); + tipc_named_reinit(net); tipc_sk_reinit(net); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 103d1fd058c0..6b09a778cc71 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -430,8 +430,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, INIT_LIST_HEAD(&tsk->cong_links); msg = &tsk->phdr; tn = net_generic(sock_net(sk), tipc_net_id); - tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, - NAMED_H_SIZE, 0); /* Finish initializing socket data structures */ sock->ops = ops; @@ -441,6 +439,13 @@ static int tipc_sk_create(struct net *net, struct socket *sock, pr_warn("Socket create failed; port number exhausted\n"); return -EINVAL; } + + /* Ensure tsk is visible before we read own_addr. */ + smp_mb(); + + tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, + NAMED_H_SIZE, 0); + msg_set_origport(msg, tsk->portid); setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); sk->sk_shutdown = 0; @@ -2234,24 +2239,27 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, void tipc_sk_reinit(struct net *net) { struct tipc_net *tn = net_generic(net, tipc_net_id); - const struct bucket_table *tbl; - struct rhash_head *pos; + struct rhashtable_iter iter; struct tipc_sock *tsk; struct tipc_msg *msg; - int i; - rcu_read_lock(); - tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); - for (i = 0; i < tbl->size; i++) { - rht_for_each_entry_rcu(tsk, pos, tbl, i, node) { + rhashtable_walk_enter(&tn->sk_rht, &iter); + + do { + tsk = ERR_PTR(rhashtable_walk_start(&iter)); + if (tsk) + continue; + + while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { spin_lock_bh(&tsk->sk.sk_lock.slock); msg = &tsk->phdr; msg_set_prevnode(msg, tn->own_addr); msg_set_orignode(msg, tn->own_addr); spin_unlock_bh(&tsk->sk.sk_lock.slock); } - } - rcu_read_unlock(); + + rhashtable_walk_stop(&iter); + } while (tsk == ERR_PTR(-EAGAIN)); } static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig index c06d3997c6e7..286ed25c1a69 100644 --- a/net/xfrm/Kconfig +++ b/net/xfrm/Kconfig @@ -6,6 +6,10 @@ config XFRM depends on NET select GRO_CELLS +config XFRM_OFFLOAD + bool + depends on XFRM + config XFRM_ALGO tristate select XFRM diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 3213fe8027be..46bdb4fbed0b 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -19,19 +19,18 @@ static struct kmem_cache *secpath_cachep __read_mostly; static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); -static struct xfrm_input_afinfo __rcu *xfrm_input_afinfo[NPROTO]; +static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1]; static struct gro_cells gro_cells; static struct net_device xfrm_napi_dev; -int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo) +int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) { int err = 0; - if (unlikely(afinfo == NULL)) - return -EINVAL; - if (unlikely(afinfo->family >= NPROTO)) + if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo))) return -EAFNOSUPPORT; + spin_lock_bh(&xfrm_input_afinfo_lock); if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL)) err = -EEXIST; @@ -42,14 +41,10 @@ int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_input_register_afinfo); -int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo) +int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) { int err = 0; - if (unlikely(afinfo == NULL)) - return -EINVAL; - if (unlikely(afinfo->family >= NPROTO)) - return -EAFNOSUPPORT; spin_lock_bh(&xfrm_input_afinfo_lock); if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) { if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo)) @@ -63,12 +58,13 @@ int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_input_unregister_afinfo); -static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family) +static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family) { - struct xfrm_input_afinfo *afinfo; + const struct xfrm_input_afinfo *afinfo; - if (unlikely(family >= NPROTO)) + if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo))) return NULL; + rcu_read_lock(); afinfo = rcu_dereference(xfrm_input_afinfo[family]); if (unlikely(!afinfo)) @@ -76,22 +72,17 @@ static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family) return afinfo; } -static void xfrm_input_put_afinfo(struct xfrm_input_afinfo *afinfo) -{ - rcu_read_unlock(); -} - static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, int err) { int ret; - struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family); + const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family); if (!afinfo) return -EAFNOSUPPORT; ret = afinfo->callback(skb, protocol, err); - xfrm_input_put_afinfo(afinfo); + rcu_read_unlock(); return ret; } @@ -114,6 +105,8 @@ struct sec_path *secpath_dup(struct sec_path *src) return NULL; sp->len = 0; + sp->olen = 0; + if (src) { int i; @@ -126,6 +119,24 @@ struct sec_path *secpath_dup(struct sec_path *src) } EXPORT_SYMBOL(secpath_dup); +int secpath_set(struct sk_buff *skb) +{ + struct sec_path *sp; + + /* Allocate new secpath or COW existing one. */ + if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { + sp = secpath_dup(skb->sp); + if (!sp) + return -ENOMEM; + + if (skb->sp) + secpath_put(skb->sp); + skb->sp = sp; + } + return 0; +} +EXPORT_SYMBOL(secpath_set); + /* Fetch spi and seq from ipsec header */ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) @@ -161,6 +172,7 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq) *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq); return 0; } +EXPORT_SYMBOL(xfrm_parse_spi); int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) { @@ -195,14 +207,23 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) unsigned int family; int decaps = 0; int async = 0; + struct xfrm_offload *xo; + bool xfrm_gro = false; - /* A negative encap_type indicates async resumption. */ if (encap_type < 0) { - async = 1; x = xfrm_input_state(skb); - seq = XFRM_SKB_CB(skb)->seq.input.low; family = x->outer_mode->afinfo->family; - goto resume; + + /* An encap_type of -1 indicates async resumption. */ + if (encap_type == -1) { + async = 1; + seq = XFRM_SKB_CB(skb)->seq.input.low; + goto resume; + } + /* encap_type < -1 indicates a GRO call. */ + encap_type = 0; + seq = XFRM_SPI_SKB_CB(skb)->seq; + goto lock; } daddr = (xfrm_address_t *)(skb_network_header(skb) + @@ -221,18 +242,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) break; } - /* Allocate new secpath or COW existing one. */ - if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { - struct sec_path *sp; - - sp = secpath_dup(skb->sp); - if (!sp) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); - goto drop; - } - if (skb->sp) - secpath_put(skb->sp); - skb->sp = sp; + err = secpath_set(skb); + if (err) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR); + goto drop; } seq = 0; @@ -256,6 +269,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) skb->sp->xvec[skb->sp->len++] = x; +lock: spin_lock(&x->lock); if (unlikely(x->km.state != XFRM_STATE_VALID)) { @@ -377,7 +391,18 @@ resume: gro_cells_receive(&gro_cells, skb); return 0; } else { - return x->inner_mode->afinfo->transport_finish(skb, async); + xo = xfrm_offload(skb); + if (xo) + xfrm_gro = xo->flags & XFRM_GRO; + + err = x->inner_mode->afinfo->transport_finish(skb, async); + if (xfrm_gro) { + skb_dst_drop(skb); + gro_cells_receive(&gro_cells, skb); + return err; + } + + return err; } drop_unlock: diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 0a0f63d3cc96..5f3e87866438 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -45,7 +45,7 @@ struct xfrm_flo { }; static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); -static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] +static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1] __read_mostly; static struct kmem_cache *xfrm_dst_cache __read_mostly; @@ -103,11 +103,11 @@ bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl return false; } -static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) +static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) { - struct xfrm_policy_afinfo *afinfo; + const struct xfrm_policy_afinfo *afinfo; - if (unlikely(family >= NPROTO)) + if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo))) return NULL; rcu_read_lock(); afinfo = rcu_dereference(xfrm_policy_afinfo[family]); @@ -116,18 +116,13 @@ static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) return afinfo; } -static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) -{ - rcu_read_unlock(); -} - static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr, int family) { - struct xfrm_policy_afinfo *afinfo; + const struct xfrm_policy_afinfo *afinfo; struct dst_entry *dst; afinfo = xfrm_policy_get_afinfo(family); @@ -136,7 +131,7 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return dst; } @@ -1431,12 +1426,12 @@ xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local, xfrm_address_t *remote, unsigned short family) { int err; - struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); if (unlikely(afinfo == NULL)) return -EINVAL; err = afinfo->get_saddr(net, oif, local, remote); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return err; } @@ -1538,21 +1533,15 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, } -/* Check that the bundle accepts the flow and its components are - * still valid. - */ - -static inline int xfrm_get_tos(const struct flowi *fl, int family) +static int xfrm_get_tos(const struct flowi *fl, int family) { - struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); - int tos; + const struct xfrm_policy_afinfo *afinfo; + int tos = 0; - if (!afinfo) - return -EINVAL; - - tos = afinfo->get_tos(fl); + afinfo = xfrm_policy_get_afinfo(family); + tos = afinfo ? afinfo->get_tos(fl) : 0; - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return tos; } @@ -1609,7 +1598,7 @@ static const struct flow_cache_ops xfrm_bundle_fc_ops = { static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) { - struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); struct dst_ops *dst_ops; struct xfrm_dst *xdst; @@ -1638,7 +1627,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) } else xdst = ERR_PTR(-ENOBUFS); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return xdst; } @@ -1646,7 +1635,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len) { - struct xfrm_policy_afinfo *afinfo = + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(dst->ops->family); int err; @@ -1655,7 +1644,7 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, err = afinfo->init_path(path, dst, nfheader_len); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return err; } @@ -1663,7 +1652,7 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, const struct flowi *fl) { - struct xfrm_policy_afinfo *afinfo = + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(xdst->u.dst.ops->family); int err; @@ -1672,7 +1661,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, err = afinfo->fill_dst(xdst, dev, fl); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return err; } @@ -1705,9 +1694,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, xfrm_flowi_addr_get(fl, &saddr, &daddr, family); tos = xfrm_get_tos(fl, family); - err = tos; - if (tos < 0) - goto put_states; dst_hold(dst); @@ -2215,7 +2201,7 @@ error: static struct dst_entry *make_blackhole(struct net *net, u16 family, struct dst_entry *dst_orig) { - struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); struct dst_entry *ret; if (!afinfo) { @@ -2224,7 +2210,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family, } else { ret = afinfo->blackhole_route(net, dst_orig); } - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return ret; } @@ -2466,7 +2452,7 @@ xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int star int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned int family, int reverse) { - struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); + const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); int err; if (unlikely(afinfo == NULL)) @@ -2474,7 +2460,7 @@ int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, afinfo->decode_session(skb, fl, reverse); err = security_xfrm_decode_session(skb, &fl->flowi_secid); - xfrm_policy_put_afinfo(afinfo); + rcu_read_unlock(); return err; } EXPORT_SYMBOL(__xfrm_decode_session); @@ -2742,10 +2728,11 @@ void xfrm_garbage_collect(struct net *net) } EXPORT_SYMBOL(xfrm_garbage_collect); -static void xfrm_garbage_collect_deferred(struct net *net) +void xfrm_garbage_collect_deferred(struct net *net) { flow_cache_flush_deferred(net); } +EXPORT_SYMBOL(xfrm_garbage_collect_deferred); static void xfrm_init_pmtu(struct dst_entry *dst) { @@ -2873,15 +2860,15 @@ static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr) path->ops->confirm_neigh(path, daddr); } -int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) +int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family) { int err = 0; - if (unlikely(afinfo == NULL)) - return -EINVAL; - if (unlikely(afinfo->family >= NPROTO)) + + if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo))) return -EAFNOSUPPORT; + spin_lock(&xfrm_policy_afinfo_lock); - if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) + if (unlikely(xfrm_policy_afinfo[family] != NULL)) err = -EEXIST; else { struct dst_ops *dst_ops = afinfo->dst_ops; @@ -2901,9 +2888,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->neigh_lookup = xfrm_neigh_lookup; if (likely(!dst_ops->confirm_neigh)) dst_ops->confirm_neigh = xfrm_confirm_neigh; - if (likely(afinfo->garbage_collect == NULL)) - afinfo->garbage_collect = xfrm_garbage_collect_deferred; - rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo); + rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo); } spin_unlock(&xfrm_policy_afinfo_lock); @@ -2911,34 +2896,24 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_policy_register_afinfo); -int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) +void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo) { - int err = 0; - if (unlikely(afinfo == NULL)) - return -EINVAL; - if (unlikely(afinfo->family >= NPROTO)) - return -EAFNOSUPPORT; - spin_lock(&xfrm_policy_afinfo_lock); - if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { - if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) - err = -EINVAL; - else - RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family], - NULL); + struct dst_ops *dst_ops = afinfo->dst_ops; + int i; + + for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) { + if (xfrm_policy_afinfo[i] != afinfo) + continue; + RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL); + break; } - spin_unlock(&xfrm_policy_afinfo_lock); - if (!err) { - struct dst_ops *dst_ops = afinfo->dst_ops; - synchronize_rcu(); + synchronize_rcu(); - dst_ops->kmem_cachep = NULL; - dst_ops->check = NULL; - dst_ops->negative_advice = NULL; - dst_ops->link_failure = NULL; - afinfo->garbage_collect = NULL; - } - return err; + dst_ops->kmem_cachep = NULL; + dst_ops->check = NULL; + dst_ops->negative_advice = NULL; + dst_ops->link_failure = NULL; } EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c index 504058631ffc..4bfcaf93fcf3 100644 --- a/samples/bpf/test_cgrp2_attach.c +++ b/samples/bpf/test_cgrp2_attach.c @@ -104,7 +104,7 @@ static int attach_filter(int cg_fd, int type, int verdict) return EXIT_FAILURE; } - ret = bpf_prog_attach(prog_fd, cg_fd, type); + ret = bpf_prog_attach(prog_fd, cg_fd, type, 0); if (ret < 0) { printf("Failed to attach prog to cgroup: '%s'\n", strerror(errno)); diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c index 6e69be37f87f..3049b1f26267 100644 --- a/samples/bpf/test_cgrp2_attach2.c +++ b/samples/bpf/test_cgrp2_attach2.c @@ -79,11 +79,12 @@ int main(int argc, char **argv) if (join_cgroup(FOO)) goto err; - if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS)) { + if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) { log_err("Attaching prog to /foo"); goto err; } + printf("Attached DROP prog. This ping in cgroup /foo should fail...\n"); assert(system(PING_CMD) != 0); /* Create cgroup /foo/bar, get fd, and join it */ @@ -94,24 +95,27 @@ int main(int argc, char **argv) if (join_cgroup(BAR)) goto err; + printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n"); assert(system(PING_CMD) != 0); - if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) { + if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) { log_err("Attaching prog to /foo/bar"); goto err; } + printf("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n"); assert(system(PING_CMD) == 0); - if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { log_err("Detaching program from /foo/bar"); goto err; } + printf("Detached PASS from /foo/bar while DROP is attached to /foo.\n" + "This ping in cgroup /foo/bar should fail...\n"); assert(system(PING_CMD) != 0); - if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) { + if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) { log_err("Attaching prog to /foo/bar"); goto err; } @@ -121,8 +125,60 @@ int main(int argc, char **argv) goto err; } + printf("Attached PASS from /foo/bar and detached DROP from /foo.\n" + "This ping in cgroup /foo/bar should pass...\n"); assert(system(PING_CMD) == 0); + if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) { + log_err("Attaching prog to /foo/bar"); + goto err; + } + + if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { + errno = 0; + log_err("Unexpected success attaching prog to /foo/bar"); + goto err; + } + + if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { + log_err("Detaching program from /foo/bar"); + goto err; + } + + if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) { + errno = 0; + log_err("Unexpected success in double detach from /foo"); + goto err; + } + + if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { + log_err("Attaching non-overridable prog to /foo"); + goto err; + } + + if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) { + errno = 0; + log_err("Unexpected success attaching non-overridable prog to /foo/bar"); + goto err; + } + + if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) { + errno = 0; + log_err("Unexpected success attaching overridable prog to /foo/bar"); + goto err; + } + + if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) { + errno = 0; + log_err("Unexpected success attaching overridable prog to /foo"); + goto err; + } + + if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) { + log_err("Attaching different non-overridable prog to /foo"); + goto err; + } + goto out; err: @@ -132,5 +188,9 @@ out: close(foo); close(bar); cleanup_cgroup_environment(); + if (!rc) + printf("PASS\n"); + else + printf("FAIL\n"); return rc; } diff --git a/samples/bpf/test_cgrp2_sock.c b/samples/bpf/test_cgrp2_sock.c index 0791b949cbe4..c3cfb23e23b5 100644 --- a/samples/bpf/test_cgrp2_sock.c +++ b/samples/bpf/test_cgrp2_sock.c @@ -75,7 +75,7 @@ int main(int argc, char **argv) return EXIT_FAILURE; } - ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE); + ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE, 0); if (ret < 0) { printf("Failed to attach prog to cgroup: '%s'\n", strerror(errno)); diff --git a/samples/bpf/test_cgrp2_sock2.c b/samples/bpf/test_cgrp2_sock2.c index 455ef0d06e93..db036077b644 100644 --- a/samples/bpf/test_cgrp2_sock2.c +++ b/samples/bpf/test_cgrp2_sock2.c @@ -55,7 +55,7 @@ int main(int argc, char **argv) } ret = bpf_prog_attach(prog_fd[filter_id], cg_fd, - BPF_CGROUP_INET_SOCK_CREATE); + BPF_CGROUP_INET_SOCK_CREATE, 0); if (ret < 0) { printf("Failed to attach prog to cgroup: '%s'\n", strerror(errno)); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e07fd5a324e6..0539a0ceef38 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -123,6 +123,12 @@ enum bpf_attach_type { #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE +/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command + * to the given target_fd cgroup the descendent cgroup will be able to + * override effective bpf program that was inherited from this cgroup + */ +#define BPF_F_ALLOW_OVERRIDE (1U << 0) + #define BPF_PSEUDO_MAP_FD 1 /* flags for BPF_MAP_UPDATE_ELEM command */ @@ -178,6 +184,7 @@ union bpf_attr { __u32 target_fd; /* container object to attach to */ __u32 attach_bpf_fd; /* eBPF program to attach */ __u32 attach_type; + __u32 attach_flags; }; } __attribute__((aligned(8))); diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 2de9c386989a..d48b70ceb25a 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -168,7 +168,8 @@ int bpf_obj_get(const char *pathname) return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr)); } -int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type) +int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, + unsigned int flags) { union bpf_attr attr; @@ -176,6 +177,7 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type) attr.target_fd = target_fd; attr.attach_bpf_fd = prog_fd; attr.attach_type = type; + attr.attach_flags = flags; return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); } diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 88f07c15423a..9f838aa6d26e 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -41,7 +41,8 @@ int bpf_map_delete_elem(int fd, const void *key); int bpf_map_get_next_key(int fd, const void *key, void *next_key); int bpf_obj_pin(int fd, const char *pathname); int bpf_obj_get(const char *pathname); -int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type); +int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, + unsigned int flags); int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 9ff0db4e2d0c..933aeec46f4a 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -1199,7 +1199,7 @@ static int ui_init(void) BUG_ON(1); } - perf_hpp__register_sort_field(fmt); + perf_hpp__prepend_sort_field(fmt); return 0; } diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 37388397b5bc..18cfcdc90356 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, list_add_tail(&format->sort_list, &list->sorts); } +void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, + struct perf_hpp_fmt *format) +{ + list_add(&format->sort_list, &list->sorts); +} + void perf_hpp__column_unregister(struct perf_hpp_fmt *format) { list_del(&format->list); @@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list) perf_hpp_list__for_each_sort_list(list, fmt) { struct perf_hpp_fmt *pos; + /* skip sort-only fields ("sort_compute" in perf diff) */ + if (!fmt->entry && !fmt->color) + continue; + perf_hpp_list__for_each_format(list, pos) { if (fmt_equal(fmt, pos)) goto next; diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 42922512c1c6..8b610dd9e2f6 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -437,7 +437,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor) } call->ip = cursor_node->ip; call->ms.sym = cursor_node->sym; - call->ms.map = cursor_node->map; + call->ms.map = map__get(cursor_node->map); if (cursor_node->branch) { call->branch_count = 1; @@ -477,6 +477,7 @@ add_child(struct callchain_node *parent, list_for_each_entry_safe(call, tmp, &new->val, list) { list_del(&call->list); + map__zput(call->ms.map); free(call); } free(new); @@ -761,6 +762,7 @@ merge_chain_branch(struct callchain_cursor *cursor, list->ms.map, list->ms.sym, false, NULL, 0, 0); list_del(&list->list); + map__zput(list->ms.map); free(list); } @@ -811,7 +813,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor, } node->ip = ip; - node->map = map; + map__zput(node->map); + node->map = map__get(map); node->sym = sym; node->branch = branch; node->nr_loop_iter = nr_loop_iter; @@ -1142,11 +1145,13 @@ static void free_callchain_node(struct callchain_node *node) list_for_each_entry_safe(list, tmp, &node->parent_val, list) { list_del(&list->list); + map__zput(list->ms.map); free(list); } list_for_each_entry_safe(list, tmp, &node->val, list) { list_del(&list->list); + map__zput(list->ms.map); free(list); } @@ -1210,6 +1215,7 @@ int callchain_node__make_parent_list(struct callchain_node *node) goto out; *new = *chain; new->has_children = false; + map__get(new->ms.map); list_add_tail(&new->list, &head); } parent = parent->parent; @@ -1230,6 +1236,7 @@ int callchain_node__make_parent_list(struct callchain_node *node) out: list_for_each_entry_safe(chain, new, &head, list) { list_del(&chain->list); + map__zput(chain->ms.map); free(chain); } return -ENOMEM; diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 35c8e379530f..4f4b60f1558a 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -5,6 +5,7 @@ #include <linux/list.h> #include <linux/rbtree.h> #include "event.h" +#include "map.h" #include "symbol.h" #define HELP_PAD "\t\t\t\t" @@ -184,8 +185,13 @@ int callchain_merge(struct callchain_cursor *cursor, */ static inline void callchain_cursor_reset(struct callchain_cursor *cursor) { + struct callchain_cursor_node *node; + cursor->nr = 0; cursor->last = &cursor->first; + + for (node = cursor->first; node != NULL; node = node->next) + map__zput(node->map); } int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 6770a9645609..7d1b7d33e644 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -1,6 +1,7 @@ #include "util.h" #include "build-id.h" #include "hist.h" +#include "map.h" #include "session.h" #include "sort.h" #include "evlist.h" @@ -1019,6 +1020,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, int max_stack_depth, void *arg) { int err, err2; + struct map *alm = NULL; + + if (al && al->map) + alm = map__get(al->map); err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, iter->evsel, al, max_stack_depth); @@ -1058,6 +1063,8 @@ out: if (!err) err = err2; + map__put(alm); + return err; } diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index d4b6514eeef5..28c216e3d5b7 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -283,6 +283,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list, struct perf_hpp_fmt *format); void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, struct perf_hpp_fmt *format); +void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, + struct perf_hpp_fmt *format); static inline void perf_hpp__column_register(struct perf_hpp_fmt *format) { @@ -294,6 +296,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format) perf_hpp_list__register_sort_field(&perf_hpp_list, format); } +static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format) +{ + perf_hpp_list__prepend_sort_field(&perf_hpp_list, format); +} + #define perf_hpp_list__for_each_format(_list, format) \ list_for_each_entry(format, &(_list)->fields, list) |